aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/amba/bus.c348
-rw-r--r--drivers/char/hw_random/nomadik-rng.c2
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c123
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c122
-rw-r--r--drivers/dma/Kconfig12
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/dmatest.c14
-rw-r--r--drivers/dma/dw_dmac.c103
-rw-r--r--drivers/dma/dw_dmac_regs.h12
-rw-r--r--drivers/dma/fsldma.c551
-rw-r--r--drivers/dma/fsldma.h6
-rw-r--r--drivers/dma/mxs-dma.c724
-rw-r--r--drivers/dma/pch_dma.c35
-rw-r--r--drivers/dma/pl330.c2
-rw-r--r--drivers/dma/ste_dma40.c1402
-rw-r--r--drivers/dma/ste_dma40_ll.c218
-rw-r--r--drivers/dma/ste_dma40_ll.h66
-rw-r--r--drivers/dma/timb_dma.c3
-rw-r--r--drivers/gpio/Kconfig5
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/ab8500-gpio.c522
-rw-r--r--drivers/gpio/janz-ttl.c3
-rw-r--r--drivers/gpio/pl061.c2
-rw-r--r--drivers/gpio/rdc321x-gpio.c3
-rw-r--r--drivers/gpio/timbgpio.c6
-rw-r--r--drivers/gpu/drm/Kconfig24
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/drm_drv.c11
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c5
-rw-r--r--drivers/gpu/drm/drm_info.c4
-rw-r--r--drivers/gpu/drm/drm_ioctl.c5
-rw-r--r--drivers/gpu/drm/drm_pci.c17
-rw-r--r--drivers/gpu/drm/drm_platform.c7
-rw-r--r--drivers/gpu/drm/omap/Makefile8
-rw-r--r--drivers/gpu/drm/omap/omap_connector.c504
-rw-r--r--drivers/gpu/drm/omap/omap_crtc.c277
-rw-r--r--drivers/gpu/drm/omap/omap_encoder.c198
-rw-r--r--drivers/gpu/drm/omap/omap_fb.c368
-rw-r--r--drivers/gpu/drm/omap/omap_fbdev.c298
-rw-r--r--drivers/gpu/drm/omap/omap_gpu.c752
-rw-r--r--drivers/gpu/drm/omap/omap_gpu_priv.h80
-rw-r--r--drivers/hwmon/Kconfig10
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/jz4740-hwmon.c4
-rw-r--r--drivers/hwmon/twl4030-madc-hwmon.c157
-rw-r--r--drivers/hwspinlock/Kconfig23
-rw-r--r--drivers/hwspinlock/Makefile6
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c548
-rw-r--r--drivers/hwspinlock/hwspinlock_internal.h61
-rw-r--r--drivers/hwspinlock/omap_hwspinlock.c231
-rw-r--r--drivers/i2c/busses/i2c-ocores.c17
-rw-r--r--drivers/i2c/busses/i2c-omap.c100
-rw-r--r--drivers/i2c/busses/i2c-xiic.c3
-rw-r--r--drivers/i2c/i2c-core.c2
-rw-r--r--drivers/idle/i7300_idle.c5
-rw-r--r--drivers/input/input.c3
-rw-r--r--drivers/input/misc/88pm860x_onkey.c2
-rw-r--r--drivers/input/misc/twl4030-vibra.c3
-rw-r--r--drivers/input/serio/ambakmi.c3
-rw-r--r--drivers/leds/leds-88pm860x.c60
-rw-r--r--drivers/leds/leds-mc13783.c7
-rw-r--r--drivers/media/radio/radio-timb.c3
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/video/timblogiw.c3
-rw-r--r--drivers/mfd/88pm860x-core.c567
-rw-r--r--drivers/mfd/88pm860x-i2c.c103
-rw-r--r--drivers/mfd/Kconfig60
-rw-r--r--drivers/mfd/Makefile8
-rw-r--r--drivers/mfd/ab3100-core.c6
-rw-r--r--drivers/mfd/ab3550-core.c6
-rw-r--r--drivers/mfd/ab8500-core.c70
-rw-r--r--drivers/mfd/ab8500-gpadc.c614
-rw-r--r--drivers/mfd/ab8500-i2c.c2
-rw-r--r--drivers/mfd/ab8500-sysctrl.c80
-rw-r--r--drivers/mfd/adp5520.c15
-rw-r--r--drivers/mfd/asic3.c10
-rw-r--r--drivers/mfd/cs5535-mfd.c37
-rw-r--r--drivers/mfd/davinci_voicecodec.c4
-rw-r--r--drivers/mfd/htc-pasic3.c7
-rw-r--r--drivers/mfd/janz-cmodio.c3
-rw-r--r--drivers/mfd/jz4740-adc.c4
-rw-r--r--drivers/mfd/max8997.c427
-rw-r--r--drivers/mfd/max8998.c4
-rw-r--r--drivers/mfd/mc13xxx-core.c21
-rw-r--r--drivers/mfd/mfd-core.c135
-rw-r--r--drivers/mfd/omap-usb-host.c1061
-rw-r--r--drivers/mfd/pcf50633-core.c24
-rw-r--r--drivers/mfd/rdc321x-southbridge.c4
-rw-r--r--drivers/mfd/sh_mobile_sdhi.c4
-rw-r--r--drivers/mfd/t7l66xb.c13
-rw-r--r--drivers/mfd/tc6387xb.c7
-rw-r--r--drivers/mfd/tc6393xb.c25
-rw-r--r--drivers/mfd/ti-ssp.c476
-rw-r--r--drivers/mfd/timberdale.c81
-rw-r--r--drivers/mfd/tps6105x.c246
-rw-r--r--drivers/mfd/tps6586x.c26
-rw-r--r--drivers/mfd/twl-core.c8
-rw-r--r--drivers/mfd/twl4030-codec.c6
-rw-r--r--drivers/mfd/twl4030-madc.c802
-rw-r--r--drivers/mfd/ucb1x00-ts.c5
-rw-r--r--drivers/mfd/vx855.c1
-rw-r--r--drivers/mfd/wl1273-core.c8
-rw-r--r--drivers/mfd/wm831x-i2c.c18
-rw-r--r--drivers/mfd/wm831x-irq.c44
-rw-r--r--drivers/mfd/wm831x-spi.c24
-rw-r--r--drivers/mfd/wm8400-core.c2
-rw-r--r--drivers/mfd/wm8994-core.c77
-rw-r--r--drivers/mfd/wm8994-irq.c14
-rw-r--r--drivers/mmc/card/Kconfig3
-rw-r--r--drivers/mmc/card/block.c1
-rw-r--r--drivers/mmc/card/mmc_test.c271
-rw-r--r--drivers/mmc/core/Makefile3
-rw-r--r--drivers/mmc/core/core.c26
-rw-r--r--drivers/mmc/core/core.h2
-rw-r--r--drivers/mmc/core/host.c5
-rw-r--r--drivers/mmc/core/mmc.c86
-rw-r--r--drivers/mmc/core/quirks.c84
-rw-r--r--drivers/mmc/core/sd.c1
-rw-r--r--drivers/mmc/core/sdio.c1
-rw-r--r--drivers/mmc/host/Kconfig13
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/atmel-mci.c19
-rw-r--r--drivers/mmc/host/cb710-mmc.c2
-rw-r--r--drivers/mmc/host/dw_mmc.c83
-rw-r--r--drivers/mmc/host/dw_mmc.h2
-rw-r--r--drivers/mmc/host/mmc_spi.c4
-rw-r--r--drivers/mmc/host/mmci.c361
-rw-r--r--drivers/mmc/host/mmci.h15
-rw-r--r--drivers/mmc/host/msm_sdcc.c48
-rw-r--r--drivers/mmc/host/msm_sdcc.h1
-rw-r--r--drivers/mmc/host/mxcmmc.c181
-rw-r--r--drivers/mmc/host/mxs-mmc.c874
-rw-r--r--drivers/mmc/host/omap_hsmmc.c36
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c134
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h1
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c3
-rw-r--r--drivers/mmc/host/sdhci-pci.c4
-rw-r--r--drivers/mmc/host/sdhci-s3c.c3
-rw-r--r--drivers/mmc/host/sdhci-tegra.c6
-rw-r--r--drivers/mmc/host/sh_mmcif.c62
-rw-r--r--drivers/mmc/host/tmio_mmc.c130
-rw-r--r--drivers/mmc/host/via-sdmmc.c3
-rw-r--r--drivers/mtd/nand/Kconfig19
-rw-r--r--drivers/mtd/nand/omap2.c367
-rw-r--r--drivers/mtd/nand/tmio_nand.c11
-rw-r--r--drivers/mtd/onenand/Kconfig2
-rw-r--r--drivers/mtd/onenand/omap2.c36
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/can/janz-ican3.c3
-rw-r--r--drivers/net/ethoc.c8
-rw-r--r--drivers/net/ks8842.c3
-rw-r--r--drivers/net/usb/cdc_eem.c2
-rw-r--r--drivers/net/usb/cdc_ether.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c2
-rw-r--r--drivers/net/usb/cdc_subset.c8
-rw-r--r--drivers/net/usb/gl620a.c2
-rw-r--r--drivers/net/usb/net1080.c2
-rw-r--r--drivers/net/usb/plusb.c2
-rw-r--r--drivers/net/usb/rndis_host.c2
-rw-r--r--drivers/net/usb/smsc95xx.c17
-rw-r--r--drivers/net/usb/usbnet.c3
-rw-r--r--drivers/net/usb/zaurus.c8
-rw-r--r--drivers/net/wan/hd64570.c20
-rw-r--r--drivers/net/wan/hd64572.c18
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig4
-rw-r--r--drivers/net/wireless/wl12xx/acx.c384
-rw-r--r--drivers/net/wireless/wl12xx/acx.h221
-rw-r--r--drivers/net/wireless/wl12xx/boot.c315
-rw-r--r--drivers/net/wireless/wl12xx/boot.h57
-rw-r--r--drivers/net/wireless/wl12xx/cmd.c423
-rw-r--r--drivers/net/wireless/wl12xx/cmd.h195
-rw-r--r--drivers/net/wireless/wl12xx/conf.h169
-rw-r--r--drivers/net/wireless/wl12xx/debugfs.c185
-rw-r--r--drivers/net/wireless/wl12xx/event.c87
-rw-r--r--drivers/net/wireless/wl12xx/event.h25
-rw-r--r--drivers/net/wireless/wl12xx/ini.h98
-rw-r--r--drivers/net/wireless/wl12xx/init.c444
-rw-r--r--drivers/net/wireless/wl12xx/init.h3
-rw-r--r--drivers/net/wireless/wl12xx/io.c11
-rw-r--r--drivers/net/wireless/wl12xx/io.h4
-rw-r--r--drivers/net/wireless/wl12xx/main.c1790
-rw-r--r--drivers/net/wireless/wl12xx/ps.c125
-rw-r--r--drivers/net/wireless/wl12xx/ps.h4
-rw-r--r--drivers/net/wireless/wl12xx/reg.h15
-rw-r--r--drivers/net/wireless/wl12xx/rx.c118
-rw-r--r--drivers/net/wireless/wl12xx/rx.h17
-rw-r--r--drivers/net/wireless/wl12xx/scan.c37
-rw-r--r--drivers/net/wireless/wl12xx/sdio.c80
-rw-r--r--drivers/net/wireless/wl12xx/sdio_test.c20
-rw-r--r--drivers/net/wireless/wl12xx/spi.c36
-rw-r--r--drivers/net/wireless/wl12xx/testmode.c6
-rw-r--r--drivers/net/wireless/wl12xx/tx.c540
-rw-r--r--drivers/net/wireless/wl12xx/tx.h75
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx.h263
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_80211.h11
-rw-r--r--drivers/of/address.c14
-rw-r--r--drivers/of/base.c3
-rw-r--r--drivers/of/platform.c347
-rw-r--r--drivers/power/jz4740-battery.c4
-rw-r--r--drivers/regulator/88pm8607.c46
-rw-r--r--drivers/regulator/Kconfig20
-rw-r--r--drivers/regulator/Makefile3
-rw-r--r--drivers/regulator/ab3100.c54
-rw-r--r--drivers/regulator/ab8500.c270
-rw-r--r--drivers/regulator/core.c116
-rw-r--r--drivers/regulator/max8997.c1213
-rw-r--r--drivers/regulator/mc13783-regulator.c7
-rw-r--r--drivers/regulator/mc13892-regulator.c7
-rw-r--r--drivers/regulator/tps6105x-regulator.c196
-rw-r--r--drivers/regulator/twl-regulator.c24
-rw-r--r--drivers/regulator/wm831x-dcdc.c26
-rw-r--r--drivers/regulator/wm831x-isink.c8
-rw-r--r--drivers/regulator/wm831x-ldo.c17
-rw-r--r--drivers/rtc/rtc-pl030.c2
-rw-r--r--drivers/rtc/rtc-pl031.c2
-rw-r--r--drivers/spi/Kconfig16
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/amba-pl022.c2
-rw-r--r--drivers/spi/davinci_spi.c11
-rw-r--r--drivers/spi/omap2_mcspi.c222
-rw-r--r--drivers/spi/pxa2xx_spi.c2
-rw-r--r--drivers/spi/pxa2xx_spi_pci.c2
-rw-r--r--drivers/spi/ti-ssp-spi.c402
-rw-r--r--drivers/spi/xilinx_spi.c9
-rw-r--r--drivers/staging/usbip/vhci_hcd.c4
-rw-r--r--drivers/tty/serial/Kconfig19
-rw-r--r--drivers/tty/serial/Makefile1
-rw-r--r--drivers/tty/serial/amba-pl010.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c513
-rw-r--r--drivers/tty/serial/msm_serial.c286
-rw-r--r--drivers/tty/serial/msm_serial.h28
-rw-r--r--drivers/tty/serial/mxs-auart.c798
-rw-r--r--drivers/tty/serial/of_serial.c18
-rw-r--r--drivers/tty/serial/omap-serial.c5
-rw-r--r--drivers/usb/atm/ueagle-atm.c19
-rw-r--r--drivers/usb/core/buffer.c26
-rw-r--r--drivers/usb/core/driver.c5
-rw-r--r--drivers/usb/core/hcd-pci.c47
-rw-r--r--drivers/usb/core/hcd.c226
-rw-r--r--drivers/usb/core/hub.c199
-rw-r--r--drivers/usb/core/message.c22
-rw-r--r--drivers/usb/core/usb.h13
-rw-r--r--drivers/usb/early/ehci-dbgp.c2
-rw-r--r--drivers/usb/gadget/Kconfig14
-rw-r--r--drivers/usb/gadget/Makefile1
-rw-r--r--drivers/usb/gadget/at91_udc.c4
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.c344
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.h9
-rw-r--r--drivers/usb/gadget/composite.c14
-rw-r--r--drivers/usb/gadget/dummy_hcd.c4
-rw-r--r--drivers/usb/gadget/epautoconf.c7
-rw-r--r--drivers/usb/gadget/f_fs.c8
-rw-r--r--drivers/usb/gadget/fsl_mxc_udc.c21
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c2
-rw-r--r--drivers/usb/gadget/fusb300_udc.c1744
-rw-r--r--drivers/usb/gadget/fusb300_udc.h687
-rw-r--r--drivers/usb/gadget/m66592-udc.c2
-rw-r--r--drivers/usb/gadget/pch_udc.c178
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c76
-rw-r--r--drivers/usb/gadget/u_ether.c2
-rw-r--r--drivers/usb/host/Kconfig39
-rw-r--r--drivers/usb/host/ehci-atmel.c6
-rw-r--r--drivers/usb/host/ehci-dbg.c4
-rw-r--r--drivers/usb/host/ehci-hcd.c29
-rw-r--r--drivers/usb/host/ehci-hub.c9
-rw-r--r--drivers/usb/host/ehci-lpm.c5
-rw-r--r--drivers/usb/host/ehci-msm.c96
-rw-r--r--drivers/usb/host/ehci-mxc.c5
-rw-r--r--drivers/usb/host/ehci-omap.c882
-rw-r--r--drivers/usb/host/ehci-orion.c5
-rw-r--r--drivers/usb/host/ehci-pci.c45
-rw-r--r--drivers/usb/host/ehci-pmcmsp.c383
-rw-r--r--drivers/usb/host/ehci-q.c18
-rw-r--r--drivers/usb/host/ehci-sched.c76
-rw-r--r--drivers/usb/host/ehci-tegra.c715
-rw-r--r--drivers/usb/host/ehci.h2
-rw-r--r--drivers/usb/host/imx21-hcd.c9
-rw-r--r--drivers/usb/host/isp116x-hcd.c6
-rw-r--r--drivers/usb/host/isp1362-hcd.c13
-rw-r--r--drivers/usb/host/isp1760-hcd.c1383
-rw-r--r--drivers/usb/host/isp1760-hcd.h55
-rw-r--r--drivers/usb/host/ohci-hcd.c18
-rw-r--r--drivers/usb/host/ohci-hub.c13
-rw-r--r--drivers/usb/host/ohci-omap3.c584
-rw-r--r--drivers/usb/host/ohci-pci.c114
-rw-r--r--drivers/usb/host/ohci-q.c4
-rw-r--r--drivers/usb/host/ohci-tmio.c8
-rw-r--r--drivers/usb/host/ohci.h4
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c6
-rw-r--r--drivers/usb/host/pci-quirks.c258
-rw-r--r--drivers/usb/host/pci-quirks.h10
-rw-r--r--drivers/usb/host/r8a66597-hcd.c5
-rw-r--r--drivers/usb/host/sl811-hcd.c6
-rw-r--r--drivers/usb/host/u132-hcd.c11
-rw-r--r--drivers/usb/host/uhci-hcd.c2
-rw-r--r--drivers/usb/host/xhci-ext-caps.h4
-rw-r--r--drivers/usb/host/xhci-hub.c392
-rw-r--r--drivers/usb/host/xhci-mem.c93
-rw-r--r--drivers/usb/host/xhci-pci.c124
-rw-r--r--drivers/usb/host/xhci-ring.c152
-rw-r--r--drivers/usb/host/xhci.c129
-rw-r--r--drivers/usb/host/xhci.h44
-rw-r--r--drivers/usb/misc/usbtest.c236
-rw-r--r--drivers/usb/mon/mon_text.c3
-rw-r--r--drivers/usb/musb/musb_core.c81
-rw-r--r--drivers/usb/musb/musb_core.h12
-rw-r--r--drivers/usb/musb/musb_gadget.c217
-rw-r--r--drivers/usb/musb/musb_gadget.h7
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c24
-rw-r--r--drivers/usb/musb/musb_host.c4
-rw-r--r--drivers/usb/musb/musb_virthub.c4
-rw-r--r--drivers/usb/musb/musbhsdma.h2
-rw-r--r--drivers/usb/musb/omap2430.c178
-rw-r--r--drivers/usb/musb/tusb6010_omap.c4
-rw-r--r--drivers/usb/otg/Kconfig9
-rw-r--r--drivers/usb/otg/Makefile3
-rw-r--r--drivers/usb/otg/ab8500-usb.c6
-rw-r--r--drivers/usb/otg/isp1301_omap.c2
-rw-r--r--drivers/usb/otg/langwell_otg.c48
-rw-r--r--drivers/usb/otg/msm_otg.c (renamed from drivers/usb/otg/msm72k_otg.c)31
-rw-r--r--drivers/usb/otg/nop-usb-xceiv.c2
-rw-r--r--drivers/usb/otg/twl4030-usb.c8
-rw-r--r--drivers/usb/otg/twl6030-usb.c56
-rw-r--r--drivers/usb/otg/ulpi_viewport.c80
-rw-r--r--drivers/usb/serial/ftdi_sio.c4
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1
-rw-r--r--drivers/usb/serial/io_edgeport.c3
-rw-r--r--drivers/usb/serial/keyspan.c10
-rw-r--r--drivers/usb/serial/keyspan_pda.c15
-rw-r--r--drivers/usb/serial/mct_u232.c107
-rw-r--r--drivers/usb/serial/mos7720.c3
-rw-r--r--drivers/usb/serial/opticon.c156
-rw-r--r--drivers/usb/serial/sierra.c16
-rw-r--r--drivers/usb/serial/usb_wwan.c43
-rw-r--r--drivers/usb/storage/Kconfig10
-rw-r--r--drivers/usb/storage/Makefile2
-rw-r--r--drivers/usb/storage/realtek_cr.c675
-rw-r--r--drivers/usb/storage/sierra_ms.c2
-rw-r--r--drivers/usb/storage/unusual_realtek.h41
-rw-r--r--drivers/usb/storage/usual-tables.c1
-rw-r--r--drivers/usb/wusbcore/rh.c6
-rw-r--r--drivers/usb/wusbcore/wusbhc.c2
-rw-r--r--drivers/usb/wusbcore/wusbhc.h2
-rw-r--r--drivers/uwb/scan.c2
-rw-r--r--drivers/video/Kconfig9
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/amba-clcd.c105
-rw-r--r--drivers/video/backlight/88pm860x_bl.c34
-rw-r--r--drivers/video/cfbfillrect.c2
-rw-r--r--drivers/video/cfbimgblt.c5
-rw-r--r--drivers/video/edid.h4
-rw-r--r--drivers/video/fb_draw.h14
-rw-r--r--drivers/video/msm/mdp_hw.h11
-rw-r--r--drivers/video/msm/mdp_ppp.c1
-rw-r--r--drivers/video/msm/msm_fb.c27
-rw-r--r--drivers/video/mxsfb.c919
-rw-r--r--drivers/video/omap/Kconfig7
-rw-r--r--drivers/video/omap2/displays/Kconfig6
-rw-r--r--drivers/video/omap2/displays/Makefile1
-rw-r--r--drivers/video/omap2/displays/panel-generic-dpi.c124
-rw-r--r--drivers/video/omap2/displays/panel-lgphilips-lb035q02.c279
-rw-r--r--drivers/video/omap2/displays/panel-taal.c123
-rw-r--r--drivers/video/omap2/dss/Kconfig15
-rw-r--r--drivers/video/omap2/dss/Makefile2
-rw-r--r--drivers/video/omap2/dss/core.c489
-rw-r--r--drivers/video/omap2/dss/dispc.c335
-rw-r--r--drivers/video/omap2/dss/display.c109
-rw-r--r--drivers/video/omap2/dss/dpi.c48
-rw-r--r--drivers/video/omap2/dss/dsi.c1027
-rw-r--r--drivers/video/omap2/dss/dss.c763
-rw-r--r--drivers/video/omap2/dss/dss.h153
-rw-r--r--drivers/video/omap2/dss/dss_features.c172
-rw-r--r--drivers/video/omap2/dss/dss_features.h47
-rw-r--r--drivers/video/omap2/dss/hdmi.c1332
-rw-r--r--drivers/video/omap2/dss/hdmi.h415
-rw-r--r--drivers/video/omap2/dss/hdmi_omap4_panel.c222
-rw-r--r--drivers/video/omap2/dss/manager.c13
-rw-r--r--drivers/video/omap2/dss/overlay.c10
-rw-r--r--drivers/video/omap2/dss/rfbi.c128
-rw-r--r--drivers/video/omap2/dss/sdi.c62
-rw-r--r--drivers/video/omap2/dss/venc.c128
-rw-r--r--drivers/video/omap2/omapfb/Kconfig8
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c23
-rw-r--r--drivers/video/sysfillrect.c2
-rw-r--r--drivers/video/sysimgblt.c6
-rw-r--r--drivers/video/tmiofb.c28
-rw-r--r--drivers/w1/masters/Kconfig2
-rw-r--r--drivers/w1/masters/ds1wm.c13
-rw-r--r--drivers/watchdog/omap_wdt.c25
-rw-r--r--drivers/watchdog/rdc321x_wdt.c3
-rw-r--r--drivers/watchdog/sp805_wdt.c2
395 files changed, 36579 insertions, 8991 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 9bfb71ff3a6..177c7d15693 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -117,4 +117,6 @@ source "drivers/staging/Kconfig"
source "drivers/platform/Kconfig"
source "drivers/clk/Kconfig"
+
+source "drivers/hwspinlock/Kconfig"
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index b423bb16c3a..3f135b6fb01 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -117,3 +117,5 @@ obj-y += platform/
obj-y += ieee802154/
#common clk code
obj-y += clk/
+
+obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index e7df019d29d..6d2bb2524b6 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -13,16 +13,17 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/amba/bus.h>
#include <asm/irq.h>
#include <asm/sizes.h>
-#define to_amba_device(d) container_of(d, struct amba_device, dev)
#define to_amba_driver(d) container_of(d, struct amba_driver, drv)
-static struct amba_id *
-amba_lookup(struct amba_id *table, struct amba_device *dev)
+static const struct amba_id *
+amba_lookup(const struct amba_id *table, struct amba_device *dev)
{
int ret = 0;
@@ -57,26 +58,6 @@ static int amba_uevent(struct device *dev, struct kobj_uevent_env *env)
#define amba_uevent NULL
#endif
-static int amba_suspend(struct device *dev, pm_message_t state)
-{
- struct amba_driver *drv = to_amba_driver(dev->driver);
- int ret = 0;
-
- if (dev->driver && drv->suspend)
- ret = drv->suspend(to_amba_device(dev), state);
- return ret;
-}
-
-static int amba_resume(struct device *dev)
-{
- struct amba_driver *drv = to_amba_driver(dev->driver);
- int ret = 0;
-
- if (dev->driver && drv->resume)
- ret = drv->resume(to_amba_device(dev));
- return ret;
-}
-
#define amba_attr_func(name,fmt,arg...) \
static ssize_t name##_show(struct device *_dev, \
struct device_attribute *attr, char *buf) \
@@ -102,17 +83,330 @@ static struct device_attribute amba_dev_attrs[] = {
__ATTR_NULL,
};
+#ifdef CONFIG_PM_SLEEP
+
+static int amba_legacy_suspend(struct device *dev, pm_message_t mesg)
+{
+ struct amba_driver *adrv = to_amba_driver(dev->driver);
+ struct amba_device *adev = to_amba_device(dev);
+ int ret = 0;
+
+ if (dev->driver && adrv->suspend)
+ ret = adrv->suspend(adev, mesg);
+
+ return ret;
+}
+
+static int amba_legacy_resume(struct device *dev)
+{
+ struct amba_driver *adrv = to_amba_driver(dev->driver);
+ struct amba_device *adev = to_amba_device(dev);
+ int ret = 0;
+
+ if (dev->driver && adrv->resume)
+ ret = adrv->resume(adev);
+
+ return ret;
+}
+
+static int amba_pm_prepare(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (drv && drv->pm && drv->pm->prepare)
+ ret = drv->pm->prepare(dev);
+
+ return ret;
+}
+
+static void amba_pm_complete(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+
+ if (drv && drv->pm && drv->pm->complete)
+ drv->pm->complete(dev);
+}
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define amba_pm_prepare NULL
+#define amba_pm_complete NULL
+
+#endif /* !CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_SUSPEND
+
+static int amba_pm_suspend(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->suspend)
+ ret = drv->pm->suspend(dev);
+ } else {
+ ret = amba_legacy_suspend(dev, PMSG_SUSPEND);
+ }
+
+ return ret;
+}
+
+static int amba_pm_suspend_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->suspend_noirq)
+ ret = drv->pm->suspend_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int amba_pm_resume(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->resume)
+ ret = drv->pm->resume(dev);
+ } else {
+ ret = amba_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+static int amba_pm_resume_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->resume_noirq)
+ ret = drv->pm->resume_noirq(dev);
+ }
+
+ return ret;
+}
+
+#else /* !CONFIG_SUSPEND */
+
+#define amba_pm_suspend NULL
+#define amba_pm_resume NULL
+#define amba_pm_suspend_noirq NULL
+#define amba_pm_resume_noirq NULL
+
+#endif /* !CONFIG_SUSPEND */
+
+#ifdef CONFIG_HIBERNATION
+
+static int amba_pm_freeze(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->freeze)
+ ret = drv->pm->freeze(dev);
+ } else {
+ ret = amba_legacy_suspend(dev, PMSG_FREEZE);
+ }
+
+ return ret;
+}
+
+static int amba_pm_freeze_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->freeze_noirq)
+ ret = drv->pm->freeze_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int amba_pm_thaw(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->thaw)
+ ret = drv->pm->thaw(dev);
+ } else {
+ ret = amba_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+static int amba_pm_thaw_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->thaw_noirq)
+ ret = drv->pm->thaw_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int amba_pm_poweroff(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->poweroff)
+ ret = drv->pm->poweroff(dev);
+ } else {
+ ret = amba_legacy_suspend(dev, PMSG_HIBERNATE);
+ }
+
+ return ret;
+}
+
+static int amba_pm_poweroff_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->poweroff_noirq)
+ ret = drv->pm->poweroff_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int amba_pm_restore(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->restore)
+ ret = drv->pm->restore(dev);
+ } else {
+ ret = amba_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+static int amba_pm_restore_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->restore_noirq)
+ ret = drv->pm->restore_noirq(dev);
+ }
+
+ return ret;
+}
+
+#else /* !CONFIG_HIBERNATION */
+
+#define amba_pm_freeze NULL
+#define amba_pm_thaw NULL
+#define amba_pm_poweroff NULL
+#define amba_pm_restore NULL
+#define amba_pm_freeze_noirq NULL
+#define amba_pm_thaw_noirq NULL
+#define amba_pm_poweroff_noirq NULL
+#define amba_pm_restore_noirq NULL
+
+#endif /* !CONFIG_HIBERNATION */
+
+#ifdef CONFIG_PM
+
+static const struct dev_pm_ops amba_pm = {
+ .prepare = amba_pm_prepare,
+ .complete = amba_pm_complete,
+ .suspend = amba_pm_suspend,
+ .resume = amba_pm_resume,
+ .freeze = amba_pm_freeze,
+ .thaw = amba_pm_thaw,
+ .poweroff = amba_pm_poweroff,
+ .restore = amba_pm_restore,
+ .suspend_noirq = amba_pm_suspend_noirq,
+ .resume_noirq = amba_pm_resume_noirq,
+ .freeze_noirq = amba_pm_freeze_noirq,
+ .thaw_noirq = amba_pm_thaw_noirq,
+ .poweroff_noirq = amba_pm_poweroff_noirq,
+ .restore_noirq = amba_pm_restore_noirq,
+ SET_RUNTIME_PM_OPS(
+ pm_generic_runtime_suspend,
+ pm_generic_runtime_resume,
+ pm_generic_runtime_idle
+ )
+};
+
+#define AMBA_PM (&amba_pm)
+
+#else /* !CONFIG_PM */
+
+#define AMBA_PM NULL
+
+#endif /* !CONFIG_PM */
+
/*
* Primecells are part of the Advanced Microcontroller Bus Architecture,
* so we call the bus "amba".
*/
-static struct bus_type amba_bustype = {
+struct bus_type amba_bustype = {
.name = "amba",
.dev_attrs = amba_dev_attrs,
.match = amba_match,
.uevent = amba_uevent,
- .suspend = amba_suspend,
- .resume = amba_resume,
+ .pm = AMBA_PM,
};
static int __init amba_init(void)
@@ -188,7 +482,7 @@ static int amba_probe(struct device *dev)
{
struct amba_device *pcdev = to_amba_device(dev);
struct amba_driver *pcdrv = to_amba_driver(dev->driver);
- struct amba_id *id = amba_lookup(pcdrv->id_table, pcdev);
+ const struct amba_id *id = amba_lookup(pcdrv->id_table, pcdev);
int ret;
do {
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index a348c7e9aa0..dd1d143eb8e 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -39,7 +39,7 @@ static struct hwrng nmk_rng = {
.read = nmk_rng_read,
};
-static int nmk_rng_probe(struct amba_device *dev, struct amba_id *id)
+static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
{
void __iomem *base;
int ret;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 5cb4d09919d..0f17ad8585d 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1371,7 +1371,7 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
goto out;
if (cpufreq_driver->suspend) {
- ret = cpufreq_driver->suspend(cpu_policy, pmsg);
+ ret = cpufreq_driver->suspend(cpu_policy);
if (ret)
printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
"step on CPU %u\n", cpu_policy->cpu);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 526bfbf6961..af434acb850 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -76,8 +76,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */
/*
- * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
- * different CPUs. It protects dbs_enable in governor start/stop.
+ * dbs_mutex protects dbs_enable in governor start/stop.
*/
static DEFINE_MUTEX(dbs_mutex);
@@ -118,7 +117,7 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
if (wall)
*wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
- return (cputime64_t)jiffies_to_usecs(idle_time);;
+ return (cputime64_t)jiffies_to_usecs(idle_time);
}
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
@@ -164,21 +163,12 @@ static struct notifier_block dbs_cpufreq_notifier_block = {
};
/************************** sysfs interface ************************/
-static ssize_t show_sampling_rate_max(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- printk_once(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
- "sysfs file is deprecated - used by: %s\n", current->comm);
- return sprintf(buf, "%u\n", -1U);
-}
-
static ssize_t show_sampling_rate_min(struct kobject *kobj,
struct attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", min_sampling_rate);
}
-define_one_global_ro(sampling_rate_max);
define_one_global_ro(sampling_rate_min);
/* cpufreq_conservative Governor Tunables */
@@ -195,33 +185,6 @@ show_one(down_threshold, down_threshold);
show_one(ignore_nice_load, ignore_nice);
show_one(freq_step, freq_step);
-/*** delete after deprecation time ***/
-#define DEPRECATION_MSG(file_name) \
- printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
- "interface is deprecated - " #file_name "\n");
-
-#define show_one_old(file_name) \
-static ssize_t show_##file_name##_old \
-(struct cpufreq_policy *unused, char *buf) \
-{ \
- printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
- "interface is deprecated - " #file_name "\n"); \
- return show_##file_name(NULL, NULL, buf); \
-}
-show_one_old(sampling_rate);
-show_one_old(sampling_down_factor);
-show_one_old(up_threshold);
-show_one_old(down_threshold);
-show_one_old(ignore_nice_load);
-show_one_old(freq_step);
-show_one_old(sampling_rate_min);
-show_one_old(sampling_rate_max);
-
-cpufreq_freq_attr_ro_old(sampling_rate_min);
-cpufreq_freq_attr_ro_old(sampling_rate_max);
-
-/*** delete after deprecation time ***/
-
static ssize_t store_sampling_down_factor(struct kobject *a,
struct attribute *b,
const char *buf, size_t count)
@@ -233,10 +196,7 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.sampling_down_factor = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -250,10 +210,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
if (ret != 1)
return -EINVAL;
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -264,16 +221,11 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
int ret;
ret = sscanf(buf, "%u", &input);
- mutex_lock(&dbs_mutex);
if (ret != 1 || input > 100 ||
- input <= dbs_tuners_ins.down_threshold) {
- mutex_unlock(&dbs_mutex);
+ input <= dbs_tuners_ins.down_threshold)
return -EINVAL;
- }
dbs_tuners_ins.up_threshold = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -284,17 +236,12 @@ static ssize_t store_down_threshold(struct kobject *a, struct attribute *b,
int ret;
ret = sscanf(buf, "%u", &input);
- mutex_lock(&dbs_mutex);
/* cannot be lower than 11 otherwise freq will not fall */
if (ret != 1 || input < 11 || input > 100 ||
- input >= dbs_tuners_ins.up_threshold) {
- mutex_unlock(&dbs_mutex);
+ input >= dbs_tuners_ins.up_threshold)
return -EINVAL;
- }
dbs_tuners_ins.down_threshold = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -313,11 +260,9 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
if (input > 1)
input = 1;
- mutex_lock(&dbs_mutex);
- if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
- mutex_unlock(&dbs_mutex);
+ if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */
return count;
- }
+
dbs_tuners_ins.ignore_nice = input;
/* we need to re-evaluate prev_cpu_idle */
@@ -329,8 +274,6 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
if (dbs_tuners_ins.ignore_nice)
dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
}
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -349,10 +292,7 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
/* no need to test here if freq_step is zero as the user might actually
* want this, they would be crazy though :) */
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.freq_step = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -364,7 +304,6 @@ define_one_global_rw(ignore_nice_load);
define_one_global_rw(freq_step);
static struct attribute *dbs_attributes[] = {
- &sampling_rate_max.attr,
&sampling_rate_min.attr,
&sampling_rate.attr,
&sampling_down_factor.attr,
@@ -380,49 +319,6 @@ static struct attribute_group dbs_attr_group = {
.name = "conservative",
};
-/*** delete after deprecation time ***/
-
-#define write_one_old(file_name) \
-static ssize_t store_##file_name##_old \
-(struct cpufreq_policy *unused, const char *buf, size_t count) \
-{ \
- printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
- "interface is deprecated - " #file_name "\n"); \
- return store_##file_name(NULL, NULL, buf, count); \
-}
-write_one_old(sampling_rate);
-write_one_old(sampling_down_factor);
-write_one_old(up_threshold);
-write_one_old(down_threshold);
-write_one_old(ignore_nice_load);
-write_one_old(freq_step);
-
-cpufreq_freq_attr_rw_old(sampling_rate);
-cpufreq_freq_attr_rw_old(sampling_down_factor);
-cpufreq_freq_attr_rw_old(up_threshold);
-cpufreq_freq_attr_rw_old(down_threshold);
-cpufreq_freq_attr_rw_old(ignore_nice_load);
-cpufreq_freq_attr_rw_old(freq_step);
-
-static struct attribute *dbs_attributes_old[] = {
- &sampling_rate_max_old.attr,
- &sampling_rate_min_old.attr,
- &sampling_rate_old.attr,
- &sampling_down_factor_old.attr,
- &up_threshold_old.attr,
- &down_threshold_old.attr,
- &ignore_nice_load_old.attr,
- &freq_step_old.attr,
- NULL
-};
-
-static struct attribute_group dbs_attr_group_old = {
- .attrs = dbs_attributes_old,
- .name = "conservative",
-};
-
-/*** delete after deprecation time ***/
-
/************************** sysfs end ************************/
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
@@ -599,12 +495,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
mutex_lock(&dbs_mutex);
- rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old);
- if (rc) {
- mutex_unlock(&dbs_mutex);
- return rc;
- }
-
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
@@ -667,7 +557,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
dbs_timer_exit(this_dbs_info);
mutex_lock(&dbs_mutex);
- sysfs_remove_group(&policy->kobj, &dbs_attr_group_old);
dbs_enable--;
mutex_destroy(&this_dbs_info->timer_mutex);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index c631f27a3dc..2e358733777 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -99,8 +99,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */
/*
- * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
- * different CPUs. It protects dbs_enable in governor start/stop.
+ * dbs_mutex protects dbs_enable in governor start/stop.
*/
static DEFINE_MUTEX(dbs_mutex);
@@ -237,21 +236,12 @@ static void ondemand_powersave_bias_init(void)
/************************** sysfs interface ************************/
-static ssize_t show_sampling_rate_max(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- printk_once(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
- "sysfs file is deprecated - used by: %s\n", current->comm);
- return sprintf(buf, "%u\n", -1U);
-}
-
static ssize_t show_sampling_rate_min(struct kobject *kobj,
struct attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", min_sampling_rate);
}
-define_one_global_ro(sampling_rate_max);
define_one_global_ro(sampling_rate_min);
/* cpufreq_ondemand Governor Tunables */
@@ -268,32 +258,6 @@ show_one(sampling_down_factor, sampling_down_factor);
show_one(ignore_nice_load, ignore_nice);
show_one(powersave_bias, powersave_bias);
-/*** delete after deprecation time ***/
-
-#define DEPRECATION_MSG(file_name) \
- printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
- "interface is deprecated - " #file_name "\n");
-
-#define show_one_old(file_name) \
-static ssize_t show_##file_name##_old \
-(struct cpufreq_policy *unused, char *buf) \
-{ \
- printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
- "interface is deprecated - " #file_name "\n"); \
- return show_##file_name(NULL, NULL, buf); \
-}
-show_one_old(sampling_rate);
-show_one_old(up_threshold);
-show_one_old(ignore_nice_load);
-show_one_old(powersave_bias);
-show_one_old(sampling_rate_min);
-show_one_old(sampling_rate_max);
-
-cpufreq_freq_attr_ro_old(sampling_rate_min);
-cpufreq_freq_attr_ro_old(sampling_rate_max);
-
-/*** delete after deprecation time ***/
-
static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
@@ -302,11 +266,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
-
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -319,11 +279,7 @@ static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
-
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.io_is_busy = !!input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -338,11 +294,7 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
input < MIN_FREQUENCY_UP_THRESHOLD) {
return -EINVAL;
}
-
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.up_threshold = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -355,7 +307,6 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.sampling_down_factor = input;
/* Reset down sampling multiplier in case it was active */
@@ -364,8 +315,6 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
dbs_info = &per_cpu(od_cpu_dbs_info, j);
dbs_info->rate_mult = 1;
}
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -384,9 +333,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
if (input > 1)
input = 1;
- mutex_lock(&dbs_mutex);
if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
- mutex_unlock(&dbs_mutex);
return count;
}
dbs_tuners_ins.ignore_nice = input;
@@ -401,8 +348,6 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
}
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -419,11 +364,8 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
if (input > 1000)
input = 1000;
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.powersave_bias = input;
ondemand_powersave_bias_init();
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -435,7 +377,6 @@ define_one_global_rw(ignore_nice_load);
define_one_global_rw(powersave_bias);
static struct attribute *dbs_attributes[] = {
- &sampling_rate_max.attr,
&sampling_rate_min.attr,
&sampling_rate.attr,
&up_threshold.attr,
@@ -451,43 +392,6 @@ static struct attribute_group dbs_attr_group = {
.name = "ondemand",
};
-/*** delete after deprecation time ***/
-
-#define write_one_old(file_name) \
-static ssize_t store_##file_name##_old \
-(struct cpufreq_policy *unused, const char *buf, size_t count) \
-{ \
- printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
- "interface is deprecated - " #file_name "\n"); \
- return store_##file_name(NULL, NULL, buf, count); \
-}
-write_one_old(sampling_rate);
-write_one_old(up_threshold);
-write_one_old(ignore_nice_load);
-write_one_old(powersave_bias);
-
-cpufreq_freq_attr_rw_old(sampling_rate);
-cpufreq_freq_attr_rw_old(up_threshold);
-cpufreq_freq_attr_rw_old(ignore_nice_load);
-cpufreq_freq_attr_rw_old(powersave_bias);
-
-static struct attribute *dbs_attributes_old[] = {
- &sampling_rate_max_old.attr,
- &sampling_rate_min_old.attr,
- &sampling_rate_old.attr,
- &up_threshold_old.attr,
- &ignore_nice_load_old.attr,
- &powersave_bias_old.attr,
- NULL
-};
-
-static struct attribute_group dbs_attr_group_old = {
- .attrs = dbs_attributes_old,
- .name = "ondemand",
-};
-
-/*** delete after deprecation time ***/
-
/************************** sysfs end ************************/
static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
@@ -644,12 +548,7 @@ static void do_dbs_timer(struct work_struct *work)
unsigned int cpu = dbs_info->cpu;
int sample_type = dbs_info->sample_type;
- /* We want all CPUs to do sampling nearly on same jiffy */
- int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
- * dbs_info->rate_mult);
-
- if (num_online_cpus() > 1)
- delay -= jiffies % delay;
+ int delay;
mutex_lock(&dbs_info->timer_mutex);
@@ -662,10 +561,20 @@ static void do_dbs_timer(struct work_struct *work)
/* Setup timer for SUB_SAMPLE */
dbs_info->sample_type = DBS_SUB_SAMPLE;
delay = dbs_info->freq_hi_jiffies;
+ } else {
+ /* We want all CPUs to do sampling nearly on
+ * same jiffy
+ */
+ delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
+ * dbs_info->rate_mult);
+
+ if (num_online_cpus() > 1)
+ delay -= jiffies % delay;
}
} else {
__cpufreq_driver_target(dbs_info->cur_policy,
dbs_info->freq_lo, CPUFREQ_RELATION_H);
+ delay = dbs_info->freq_lo_jiffies;
}
queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
mutex_unlock(&dbs_info->timer_mutex);
@@ -730,12 +639,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
mutex_lock(&dbs_mutex);
- rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old);
- if (rc) {
- mutex_unlock(&dbs_mutex);
- return rc;
- }
-
dbs_enable++;
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
@@ -788,7 +691,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
dbs_timer_exit(this_dbs_info);
mutex_lock(&dbs_mutex);
- sysfs_remove_group(&policy->kobj, &dbs_attr_group_old);
mutex_destroy(&this_dbs_info->timer_mutex);
dbs_enable--;
mutex_unlock(&dbs_mutex);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 1c28816152f..a572600e44e 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -82,7 +82,7 @@ config INTEL_IOP_ADMA
config DW_DMAC
tristate "Synopsys DesignWare AHB DMA support"
- depends on AVR32
+ depends on HAVE_CLK
select DMA_ENGINE
default y if CPU_AT32AP7000
help
@@ -221,12 +221,20 @@ config IMX_SDMA
config IMX_DMA
tristate "i.MX DMA support"
- depends on ARCH_MX1 || ARCH_MX21 || MACH_MX27
+ depends on IMX_HAVE_DMA_V1
select DMA_ENGINE
help
Support the i.MX DMA engine. This engine is integrated into
Freescale i.MX1/21/27 chips.
+config MXS_DMA
+ bool "MXS DMA support"
+ depends on SOC_IMX23 || SOC_IMX28
+ select DMA_ENGINE
+ help
+ Support the MXS DMA engine. This engine including APBH-DMA
+ and APBX-DMA is integrated into Freescale i.MX23/28 chips.
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 64b21f5cd74..802b55795f8 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
obj-$(CONFIG_IMX_DMA) += imx-dma.o
+obj-$(CONFIG_MXS_DMA) += mxs-dma.o
obj-$(CONFIG_TIMB_DMA) += timb_dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_PL330_DMA) += pl330.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 07bca4970e5..e6d7228b147 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1845,7 +1845,7 @@ static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
}
#endif
-static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
+static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
{
struct pl08x_driver_data *pl08x;
const struct vendor_data *vd = id->data;
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 5589358b684..e0888cb538d 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -54,6 +54,11 @@ module_param(pq_sources, uint, S_IRUGO);
MODULE_PARM_DESC(pq_sources,
"Number of p+q source buffers (default: 3)");
+static int timeout = 3000;
+module_param(timeout, uint, S_IRUGO);
+MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), \
+ Pass -1 for infinite timeout");
+
/*
* Initialization patterns. All bytes in the source buffer has bit 7
* set, all bytes in the destination buffer has bit 7 cleared.
@@ -285,7 +290,12 @@ static int dmatest_func(void *data)
set_user_nice(current, 10);
- flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT;
+ /*
+ * src buffers are freed by the DMAEngine code with dma_unmap_single()
+ * dst buffers are freed by ourselves below
+ */
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT
+ | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
while (!kthread_should_stop()
&& !(iterations && total_tests >= iterations)) {
@@ -294,7 +304,7 @@ static int dmatest_func(void *data)
dma_addr_t dma_srcs[src_cnt];
dma_addr_t dma_dsts[dst_cnt];
struct completion cmp;
- unsigned long tmo = msecs_to_jiffies(3000);
+ unsigned long tmo = msecs_to_jiffies(timeout);
u8 align = 0;
total_tests++;
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index a3991ab0d67..9c25c7d099e 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -32,26 +32,30 @@
* which does not support descriptor writeback.
*/
-/* NOTE: DMS+SMS is system-specific. We should get this information
- * from the platform code somehow.
- */
-#define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \
- | DWC_CTLL_SRC_MSIZE(0) \
- | DWC_CTLL_DMS(0) \
- | DWC_CTLL_SMS(1) \
- | DWC_CTLL_LLP_D_EN \
- | DWC_CTLL_LLP_S_EN)
+#define DWC_DEFAULT_CTLLO(private) ({ \
+ struct dw_dma_slave *__slave = (private); \
+ int dms = __slave ? __slave->dst_master : 0; \
+ int sms = __slave ? __slave->src_master : 1; \
+ u8 smsize = __slave ? __slave->src_msize : DW_DMA_MSIZE_16; \
+ u8 dmsize = __slave ? __slave->dst_msize : DW_DMA_MSIZE_16; \
+ \
+ (DWC_CTLL_DST_MSIZE(dmsize) \
+ | DWC_CTLL_SRC_MSIZE(smsize) \
+ | DWC_CTLL_LLP_D_EN \
+ | DWC_CTLL_LLP_S_EN \
+ | DWC_CTLL_DMS(dms) \
+ | DWC_CTLL_SMS(sms)); \
+ })
/*
* This is configuration-dependent and usually a funny size like 4095.
- * Let's round it down to the nearest power of two.
*
* Note that this is a transfer count, i.e. if we transfer 32-bit
- * words, we can do 8192 bytes per descriptor.
+ * words, we can do 16380 bytes per descriptor.
*
* This parameter is also system-specific.
*/
-#define DWC_MAX_COUNT 2048U
+#define DWC_MAX_COUNT 4095U
/*
* Number of descriptors to allocate for each channel. This should be
@@ -84,11 +88,6 @@ static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
}
-static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc)
-{
- return list_entry(dwc->queue.next, struct dw_desc, desc_node);
-}
-
static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
{
struct dw_desc *desc, *_desc;
@@ -201,6 +200,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
dma_async_tx_callback callback;
void *param;
struct dma_async_tx_descriptor *txd = &desc->txd;
+ struct dw_desc *child;
dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
@@ -209,6 +209,12 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
param = txd->callback_param;
dwc_sync_desc_for_cpu(dwc, desc);
+
+ /* async_tx_ack */
+ list_for_each_entry(child, &desc->tx_list, desc_node)
+ async_tx_ack(&child->txd);
+ async_tx_ack(&desc->txd);
+
list_splice_init(&desc->tx_list, &dwc->free_list);
list_move(&desc->desc_node, &dwc->free_list);
@@ -259,10 +265,11 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
* Submit queued descriptors ASAP, i.e. before we go through
* the completed ones.
*/
- if (!list_empty(&dwc->queue))
- dwc_dostart(dwc, dwc_first_queued(dwc));
list_splice_init(&dwc->active_list, &list);
- list_splice_init(&dwc->queue, &dwc->active_list);
+ if (!list_empty(&dwc->queue)) {
+ list_move(dwc->queue.next, &dwc->active_list);
+ dwc_dostart(dwc, dwc_first_active(dwc));
+ }
list_for_each_entry_safe(desc, _desc, &list, desc_node)
dwc_descriptor_complete(dwc, desc);
@@ -291,6 +298,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
return;
}
+ if (list_empty(&dwc->active_list))
+ return;
+
dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
@@ -319,8 +329,8 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
cpu_relax();
if (!list_empty(&dwc->queue)) {
- dwc_dostart(dwc, dwc_first_queued(dwc));
- list_splice_init(&dwc->queue, &dwc->active_list);
+ list_move(dwc->queue.next, &dwc->active_list);
+ dwc_dostart(dwc, dwc_first_active(dwc));
}
}
@@ -346,7 +356,7 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
*/
bad_desc = dwc_first_active(dwc);
list_del_init(&bad_desc->desc_node);
- list_splice_init(&dwc->queue, dwc->active_list.prev);
+ list_move(dwc->queue.next, dwc->active_list.prev);
/* Clear the error flag and try to restart the controller */
dma_writel(dw, CLEAR.ERROR, dwc->mask);
@@ -541,8 +551,8 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
if (list_empty(&dwc->active_list)) {
dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
desc->txd.cookie);
- dwc_dostart(dwc, desc);
list_add_tail(&desc->desc_node, &dwc->active_list);
+ dwc_dostart(dwc, dwc_first_active(dwc));
} else {
dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
desc->txd.cookie);
@@ -581,14 +591,16 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
* We can be a lot more clever here, but this should take care
* of the most common optimization.
*/
- if (!((src | dest | len) & 3))
+ if (!((src | dest | len) & 7))
+ src_width = dst_width = 3;
+ else if (!((src | dest | len) & 3))
src_width = dst_width = 2;
else if (!((src | dest | len) & 1))
src_width = dst_width = 1;
else
src_width = dst_width = 0;
- ctllo = DWC_DEFAULT_CTLLO
+ ctllo = DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_DST_WIDTH(dst_width)
| DWC_CTLL_SRC_WIDTH(src_width)
| DWC_CTLL_DST_INC
@@ -669,11 +681,11 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
switch (direction) {
case DMA_TO_DEVICE:
- ctllo = (DWC_DEFAULT_CTLLO
+ ctllo = (DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_DST_WIDTH(reg_width)
| DWC_CTLL_DST_FIX
| DWC_CTLL_SRC_INC
- | DWC_CTLL_FC_M2P);
+ | DWC_CTLL_FC(dws->fc));
reg = dws->tx_reg;
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
@@ -714,11 +726,11 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
break;
case DMA_FROM_DEVICE:
- ctllo = (DWC_DEFAULT_CTLLO
+ ctllo = (DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_SRC_WIDTH(reg_width)
| DWC_CTLL_DST_INC
| DWC_CTLL_SRC_FIX
- | DWC_CTLL_FC_P2M);
+ | DWC_CTLL_FC(dws->fc));
reg = dws->rx_reg;
for_each_sg(sgl, sg, sg_len, i) {
@@ -834,7 +846,9 @@ dwc_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret != DMA_SUCCESS) {
+ spin_lock_bh(&dwc->lock);
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
+ spin_unlock_bh(&dwc->lock);
last_complete = dwc->completed;
last_used = chan->cookie;
@@ -889,8 +903,11 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
cfghi = dws->cfg_hi;
- cfglo = dws->cfg_lo;
+ cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
}
+
+ cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority);
+
channel_writel(dwc, CFG_LO, cfglo);
channel_writel(dwc, CFG_HI, cfghi);
@@ -1126,23 +1143,23 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
case DMA_TO_DEVICE:
desc->lli.dar = dws->tx_reg;
desc->lli.sar = buf_addr + (period_len * i);
- desc->lli.ctllo = (DWC_DEFAULT_CTLLO
+ desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_DST_WIDTH(reg_width)
| DWC_CTLL_SRC_WIDTH(reg_width)
| DWC_CTLL_DST_FIX
| DWC_CTLL_SRC_INC
- | DWC_CTLL_FC_M2P
+ | DWC_CTLL_FC(dws->fc)
| DWC_CTLL_INT_EN);
break;
case DMA_FROM_DEVICE:
desc->lli.dar = buf_addr + (period_len * i);
desc->lli.sar = dws->rx_reg;
- desc->lli.ctllo = (DWC_DEFAULT_CTLLO
+ desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_SRC_WIDTH(reg_width)
| DWC_CTLL_DST_WIDTH(reg_width)
| DWC_CTLL_DST_INC
| DWC_CTLL_SRC_FIX
- | DWC_CTLL_FC_P2M
+ | DWC_CTLL_FC(dws->fc)
| DWC_CTLL_INT_EN);
break;
default:
@@ -1307,7 +1324,17 @@ static int __init dw_probe(struct platform_device *pdev)
dwc->chan.device = &dw->dma;
dwc->chan.cookie = dwc->completed = 1;
dwc->chan.chan_id = i;
- list_add_tail(&dwc->chan.device_node, &dw->dma.channels);
+ if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
+ list_add_tail(&dwc->chan.device_node,
+ &dw->dma.channels);
+ else
+ list_add(&dwc->chan.device_node, &dw->dma.channels);
+
+ /* 7 is highest priority & 0 is lowest. */
+ if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
+ dwc->priority = 7 - i;
+ else
+ dwc->priority = i;
dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
spin_lock_init(&dwc->lock);
@@ -1335,6 +1362,8 @@ static int __init dw_probe(struct platform_device *pdev)
dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
+ if (pdata->is_private)
+ dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
dw->dma.dev = &pdev->dev;
dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
dw->dma.device_free_chan_resources = dwc_free_chan_resources;
@@ -1447,7 +1476,7 @@ static int __init dw_init(void)
{
return platform_driver_probe(&dw_driver, dw_probe);
}
-module_init(dw_init);
+subsys_initcall(dw_init);
static void __exit dw_exit(void)
{
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index d9a939f67f4..720f821527f 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -86,6 +86,7 @@ struct dw_dma_regs {
#define DWC_CTLL_SRC_MSIZE(n) ((n)<<14)
#define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */
#define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */
+#define DWC_CTLL_FC(n) ((n) << 20)
#define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */
#define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */
#define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */
@@ -101,6 +102,8 @@ struct dw_dma_regs {
#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff
/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */
+#define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */
+#define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */
#define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */
#define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */
#define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */
@@ -134,6 +137,7 @@ struct dw_dma_chan {
struct dma_chan chan;
void __iomem *ch_regs;
u8 mask;
+ u8 priority;
spinlock_t lock;
@@ -155,9 +159,9 @@ __dwc_regs(struct dw_dma_chan *dwc)
}
#define channel_readl(dwc, name) \
- __raw_readl(&(__dwc_regs(dwc)->name))
+ readl(&(__dwc_regs(dwc)->name))
#define channel_writel(dwc, name, val) \
- __raw_writel((val), &(__dwc_regs(dwc)->name))
+ writel((val), &(__dwc_regs(dwc)->name))
static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
{
@@ -181,9 +185,9 @@ static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
}
#define dma_readl(dw, name) \
- __raw_readl(&(__dw_regs(dw)->name))
+ readl(&(__dw_regs(dw)->name))
#define dma_writel(dw, name, val) \
- __raw_writel((val), &(__dw_regs(dw)->name))
+ writel((val), &(__dw_regs(dw)->name))
#define channel_set_bit(dw, reg, mask) \
dma_writel(dw, reg, ((mask) << 8) | (mask))
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 4de947a450f..8670a501212 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -37,35 +37,16 @@
#include "fsldma.h"
-static const char msg_ld_oom[] = "No free memory for link descriptor\n";
+#define chan_dbg(chan, fmt, arg...) \
+ dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
+#define chan_err(chan, fmt, arg...) \
+ dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
-static void dma_init(struct fsldma_chan *chan)
-{
- /* Reset the channel */
- DMA_OUT(chan, &chan->regs->mr, 0, 32);
+static const char msg_ld_oom[] = "No free memory for link descriptor";
- switch (chan->feature & FSL_DMA_IP_MASK) {
- case FSL_DMA_IP_85XX:
- /* Set the channel to below modes:
- * EIE - Error interrupt enable
- * EOSIE - End of segments interrupt enable (basic mode)
- * EOLNIE - End of links interrupt enable
- * BWC - Bandwidth sharing among channels
- */
- DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
- | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE
- | FSL_DMA_MR_EOSIE, 32);
- break;
- case FSL_DMA_IP_83XX:
- /* Set the channel to below modes:
- * EOTIE - End-of-transfer interrupt enable
- * PRC_RM - PCI read multiple
- */
- DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
- | FSL_DMA_MR_PRC_RM, 32);
- break;
- }
-}
+/*
+ * Register Helpers
+ */
static void set_sr(struct fsldma_chan *chan, u32 val)
{
@@ -77,14 +58,38 @@ static u32 get_sr(struct fsldma_chan *chan)
return DMA_IN(chan, &chan->regs->sr, 32);
}
+static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
+{
+ DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
+}
+
+static dma_addr_t get_cdar(struct fsldma_chan *chan)
+{
+ return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
+}
+
+static u32 get_bcr(struct fsldma_chan *chan)
+{
+ return DMA_IN(chan, &chan->regs->bcr, 32);
+}
+
+/*
+ * Descriptor Helpers
+ */
+
static void set_desc_cnt(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, u32 count)
{
hw->count = CPU_TO_DMA(chan, count, 32);
}
+static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
+{
+ return DMA_TO_CPU(chan, desc->hw.count, 32);
+}
+
static void set_desc_src(struct fsldma_chan *chan,
- struct fsl_dma_ld_hw *hw, dma_addr_t src)
+ struct fsl_dma_ld_hw *hw, dma_addr_t src)
{
u64 snoop_bits;
@@ -93,8 +98,18 @@ static void set_desc_src(struct fsldma_chan *chan,
hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
}
+static dma_addr_t get_desc_src(struct fsldma_chan *chan,
+ struct fsl_desc_sw *desc)
+{
+ u64 snoop_bits;
+
+ snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
+ ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
+ return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
+}
+
static void set_desc_dst(struct fsldma_chan *chan,
- struct fsl_dma_ld_hw *hw, dma_addr_t dst)
+ struct fsl_dma_ld_hw *hw, dma_addr_t dst)
{
u64 snoop_bits;
@@ -103,8 +118,18 @@ static void set_desc_dst(struct fsldma_chan *chan,
hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
}
+static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
+ struct fsl_desc_sw *desc)
+{
+ u64 snoop_bits;
+
+ snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
+ ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
+ return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
+}
+
static void set_desc_next(struct fsldma_chan *chan,
- struct fsl_dma_ld_hw *hw, dma_addr_t next)
+ struct fsl_dma_ld_hw *hw, dma_addr_t next)
{
u64 snoop_bits;
@@ -113,24 +138,46 @@ static void set_desc_next(struct fsldma_chan *chan,
hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
}
-static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
+static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
{
- DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
-}
+ u64 snoop_bits;
-static dma_addr_t get_cdar(struct fsldma_chan *chan)
-{
- return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
-}
+ snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
+ ? FSL_DMA_SNEN : 0;
-static dma_addr_t get_ndar(struct fsldma_chan *chan)
-{
- return DMA_IN(chan, &chan->regs->ndar, 64);
+ desc->hw.next_ln_addr = CPU_TO_DMA(chan,
+ DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
+ | snoop_bits, 64);
}
-static u32 get_bcr(struct fsldma_chan *chan)
+/*
+ * DMA Engine Hardware Control Helpers
+ */
+
+static void dma_init(struct fsldma_chan *chan)
{
- return DMA_IN(chan, &chan->regs->bcr, 32);
+ /* Reset the channel */
+ DMA_OUT(chan, &chan->regs->mr, 0, 32);
+
+ switch (chan->feature & FSL_DMA_IP_MASK) {
+ case FSL_DMA_IP_85XX:
+ /* Set the channel to below modes:
+ * EIE - Error interrupt enable
+ * EOLNIE - End of links interrupt enable
+ * BWC - Bandwidth sharing among channels
+ */
+ DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
+ | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32);
+ break;
+ case FSL_DMA_IP_83XX:
+ /* Set the channel to below modes:
+ * EOTIE - End-of-transfer interrupt enable
+ * PRC_RM - PCI read multiple
+ */
+ DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
+ | FSL_DMA_MR_PRC_RM, 32);
+ break;
+ }
}
static int dma_is_idle(struct fsldma_chan *chan)
@@ -139,25 +186,32 @@ static int dma_is_idle(struct fsldma_chan *chan)
return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
}
+/*
+ * Start the DMA controller
+ *
+ * Preconditions:
+ * - the CDAR register must point to the start descriptor
+ * - the MRn[CS] bit must be cleared
+ */
static void dma_start(struct fsldma_chan *chan)
{
u32 mode;
mode = DMA_IN(chan, &chan->regs->mr, 32);
- if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
- if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
- DMA_OUT(chan, &chan->regs->bcr, 0, 32);
- mode |= FSL_DMA_MR_EMP_EN;
- } else {
- mode &= ~FSL_DMA_MR_EMP_EN;
- }
+ if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
+ DMA_OUT(chan, &chan->regs->bcr, 0, 32);
+ mode |= FSL_DMA_MR_EMP_EN;
+ } else {
+ mode &= ~FSL_DMA_MR_EMP_EN;
}
- if (chan->feature & FSL_DMA_CHAN_START_EXT)
+ if (chan->feature & FSL_DMA_CHAN_START_EXT) {
mode |= FSL_DMA_MR_EMS_EN;
- else
+ } else {
+ mode &= ~FSL_DMA_MR_EMS_EN;
mode |= FSL_DMA_MR_CS;
+ }
DMA_OUT(chan, &chan->regs->mr, mode, 32);
}
@@ -167,13 +221,26 @@ static void dma_halt(struct fsldma_chan *chan)
u32 mode;
int i;
+ /* read the mode register */
mode = DMA_IN(chan, &chan->regs->mr, 32);
- mode |= FSL_DMA_MR_CA;
- DMA_OUT(chan, &chan->regs->mr, mode, 32);
- mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA);
+ /*
+ * The 85xx controller supports channel abort, which will stop
+ * the current transfer. On 83xx, this bit is the transfer error
+ * mask bit, which should not be changed.
+ */
+ if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
+ mode |= FSL_DMA_MR_CA;
+ DMA_OUT(chan, &chan->regs->mr, mode, 32);
+
+ mode &= ~FSL_DMA_MR_CA;
+ }
+
+ /* stop the DMA controller */
+ mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN);
DMA_OUT(chan, &chan->regs->mr, mode, 32);
+ /* wait for the DMA controller to become idle */
for (i = 0; i < 100; i++) {
if (dma_is_idle(chan))
return;
@@ -182,20 +249,7 @@ static void dma_halt(struct fsldma_chan *chan)
}
if (!dma_is_idle(chan))
- dev_err(chan->dev, "DMA halt timeout!\n");
-}
-
-static void set_ld_eol(struct fsldma_chan *chan,
- struct fsl_desc_sw *desc)
-{
- u64 snoop_bits;
-
- snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
- ? FSL_DMA_SNEN : 0;
-
- desc->hw.next_ln_addr = CPU_TO_DMA(chan,
- DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
- | snoop_bits, 64);
+ chan_err(chan, "DMA halt timeout!\n");
}
/**
@@ -321,8 +375,7 @@ static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
chan->feature &= ~FSL_DMA_CHAN_START_EXT;
}
-static void append_ld_queue(struct fsldma_chan *chan,
- struct fsl_desc_sw *desc)
+static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
{
struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
@@ -363,8 +416,8 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
cookie = chan->common.cookie;
list_for_each_entry(child, &desc->tx_list, node) {
cookie++;
- if (cookie < 0)
- cookie = 1;
+ if (cookie < DMA_MIN_COOKIE)
+ cookie = DMA_MIN_COOKIE;
child->async_tx.cookie = cookie;
}
@@ -385,15 +438,14 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
*
* Return - The descriptor allocated. NULL for failed.
*/
-static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
- struct fsldma_chan *chan)
+static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
{
struct fsl_desc_sw *desc;
dma_addr_t pdesc;
desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
if (!desc) {
- dev_dbg(chan->dev, "out of memory for link desc\n");
+ chan_dbg(chan, "out of memory for link descriptor\n");
return NULL;
}
@@ -403,10 +455,13 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
desc->async_tx.tx_submit = fsl_dma_tx_submit;
desc->async_tx.phys = pdesc;
+#ifdef FSL_DMA_LD_DEBUG
+ chan_dbg(chan, "LD %p allocated\n", desc);
+#endif
+
return desc;
}
-
/**
* fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
* @chan : Freescale DMA channel
@@ -427,13 +482,11 @@ static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
* We need the descriptor to be aligned to 32bytes
* for meeting FSL DMA specification requirement.
*/
- chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
- chan->dev,
+ chan->desc_pool = dma_pool_create(chan->name, chan->dev,
sizeof(struct fsl_desc_sw),
__alignof__(struct fsl_desc_sw), 0);
if (!chan->desc_pool) {
- dev_err(chan->dev, "unable to allocate channel %d "
- "descriptor pool\n", chan->id);
+ chan_err(chan, "unable to allocate descriptor pool\n");
return -ENOMEM;
}
@@ -455,6 +508,9 @@ static void fsldma_free_desc_list(struct fsldma_chan *chan,
list_for_each_entry_safe(desc, _desc, list, node) {
list_del(&desc->node);
+#ifdef FSL_DMA_LD_DEBUG
+ chan_dbg(chan, "LD %p free\n", desc);
+#endif
dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
}
}
@@ -466,6 +522,9 @@ static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
list_for_each_entry_safe_reverse(desc, _desc, list, node) {
list_del(&desc->node);
+#ifdef FSL_DMA_LD_DEBUG
+ chan_dbg(chan, "LD %p free\n", desc);
+#endif
dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
}
}
@@ -479,7 +538,7 @@ static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
struct fsldma_chan *chan = to_fsl_chan(dchan);
unsigned long flags;
- dev_dbg(chan->dev, "Free all channel resources.\n");
+ chan_dbg(chan, "free all channel resources\n");
spin_lock_irqsave(&chan->desc_lock, flags);
fsldma_free_desc_list(chan, &chan->ld_pending);
fsldma_free_desc_list(chan, &chan->ld_running);
@@ -502,7 +561,7 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
new = fsl_dma_alloc_descriptor(chan);
if (!new) {
- dev_err(chan->dev, msg_ld_oom);
+ chan_err(chan, "%s\n", msg_ld_oom);
return NULL;
}
@@ -512,14 +571,15 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
/* Insert the link descriptor to the LD ring */
list_add_tail(&new->node, &new->tx_list);
- /* Set End-of-link to the last link descriptor of new list*/
+ /* Set End-of-link to the last link descriptor of new list */
set_ld_eol(chan, new);
return &new->async_tx;
}
-static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
- struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src,
+static struct dma_async_tx_descriptor *
+fsl_dma_prep_memcpy(struct dma_chan *dchan,
+ dma_addr_t dma_dst, dma_addr_t dma_src,
size_t len, unsigned long flags)
{
struct fsldma_chan *chan;
@@ -539,12 +599,9 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
/* Allocate the link descriptor from DMA pool */
new = fsl_dma_alloc_descriptor(chan);
if (!new) {
- dev_err(chan->dev, msg_ld_oom);
+ chan_err(chan, "%s\n", msg_ld_oom);
goto fail;
}
-#ifdef FSL_DMA_LD_DEBUG
- dev_dbg(chan->dev, "new link desc alloc %p\n", new);
-#endif
copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
@@ -572,7 +629,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
new->async_tx.flags = flags; /* client is in control of this ack */
new->async_tx.cookie = -EBUSY;
- /* Set End-of-link to the last link descriptor of new list*/
+ /* Set End-of-link to the last link descriptor of new list */
set_ld_eol(chan, new);
return &first->async_tx;
@@ -627,12 +684,9 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
/* allocate and populate the descriptor */
new = fsl_dma_alloc_descriptor(chan);
if (!new) {
- dev_err(chan->dev, msg_ld_oom);
+ chan_err(chan, "%s\n", msg_ld_oom);
goto fail;
}
-#ifdef FSL_DMA_LD_DEBUG
- dev_dbg(chan->dev, "new link desc alloc %p\n", new);
-#endif
set_desc_cnt(chan, &new->hw, len);
set_desc_src(chan, &new->hw, src);
@@ -744,14 +798,15 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
switch (cmd) {
case DMA_TERMINATE_ALL:
+ spin_lock_irqsave(&chan->desc_lock, flags);
+
/* Halt the DMA engine */
dma_halt(chan);
- spin_lock_irqsave(&chan->desc_lock, flags);
-
/* Remove and free all of the descriptors in the LD queue */
fsldma_free_desc_list(chan, &chan->ld_pending);
fsldma_free_desc_list(chan, &chan->ld_running);
+ chan->idle = true;
spin_unlock_irqrestore(&chan->desc_lock, flags);
return 0;
@@ -789,140 +844,87 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
}
/**
- * fsl_dma_update_completed_cookie - Update the completed cookie.
- * @chan : Freescale DMA channel
- *
- * CONTEXT: hardirq
- */
-static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan)
-{
- struct fsl_desc_sw *desc;
- unsigned long flags;
- dma_cookie_t cookie;
-
- spin_lock_irqsave(&chan->desc_lock, flags);
-
- if (list_empty(&chan->ld_running)) {
- dev_dbg(chan->dev, "no running descriptors\n");
- goto out_unlock;
- }
-
- /* Get the last descriptor, update the cookie to that */
- desc = to_fsl_desc(chan->ld_running.prev);
- if (dma_is_idle(chan))
- cookie = desc->async_tx.cookie;
- else {
- cookie = desc->async_tx.cookie - 1;
- if (unlikely(cookie < DMA_MIN_COOKIE))
- cookie = DMA_MAX_COOKIE;
- }
-
- chan->completed_cookie = cookie;
-
-out_unlock:
- spin_unlock_irqrestore(&chan->desc_lock, flags);
-}
-
-/**
- * fsldma_desc_status - Check the status of a descriptor
+ * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
* @chan: Freescale DMA channel
- * @desc: DMA SW descriptor
- *
- * This function will return the status of the given descriptor
- */
-static enum dma_status fsldma_desc_status(struct fsldma_chan *chan,
- struct fsl_desc_sw *desc)
-{
- return dma_async_is_complete(desc->async_tx.cookie,
- chan->completed_cookie,
- chan->common.cookie);
-}
-
-/**
- * fsl_chan_ld_cleanup - Clean up link descriptors
- * @chan : Freescale DMA channel
+ * @desc: descriptor to cleanup and free
*
- * This function clean up the ld_queue of DMA channel.
+ * This function is used on a descriptor which has been executed by the DMA
+ * controller. It will run any callbacks, submit any dependencies, and then
+ * free the descriptor.
*/
-static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
+static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
+ struct fsl_desc_sw *desc)
{
- struct fsl_desc_sw *desc, *_desc;
- unsigned long flags;
-
- spin_lock_irqsave(&chan->desc_lock, flags);
-
- dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie);
- list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
- dma_async_tx_callback callback;
- void *callback_param;
-
- if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS)
- break;
+ struct dma_async_tx_descriptor *txd = &desc->async_tx;
+ struct device *dev = chan->common.device->dev;
+ dma_addr_t src = get_desc_src(chan, desc);
+ dma_addr_t dst = get_desc_dst(chan, desc);
+ u32 len = get_desc_cnt(chan, desc);
+
+ /* Run the link descriptor callback function */
+ if (txd->callback) {
+#ifdef FSL_DMA_LD_DEBUG
+ chan_dbg(chan, "LD %p callback\n", desc);
+#endif
+ txd->callback(txd->callback_param);
+ }
- /* Remove from the list of running transactions */
- list_del(&desc->node);
+ /* Run any dependencies */
+ dma_run_dependencies(txd);
- /* Run the link descriptor callback function */
- callback = desc->async_tx.callback;
- callback_param = desc->async_tx.callback_param;
- if (callback) {
- spin_unlock_irqrestore(&chan->desc_lock, flags);
- dev_dbg(chan->dev, "LD %p callback\n", desc);
- callback(callback_param);
- spin_lock_irqsave(&chan->desc_lock, flags);
- }
+ /* Unmap the dst buffer, if requested */
+ if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+ dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE);
+ else
+ dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE);
+ }
- /* Run any dependencies, then free the descriptor */
- dma_run_dependencies(&desc->async_tx);
- dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
+ /* Unmap the src buffer, if requested */
+ if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+ dma_unmap_single(dev, src, len, DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, src, len, DMA_TO_DEVICE);
}
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+#ifdef FSL_DMA_LD_DEBUG
+ chan_dbg(chan, "LD %p free\n", desc);
+#endif
+ dma_pool_free(chan->desc_pool, desc, txd->phys);
}
/**
* fsl_chan_xfer_ld_queue - transfer any pending transactions
* @chan : Freescale DMA channel
*
- * This will make sure that any pending transactions will be run.
- * If the DMA controller is idle, it will be started. Otherwise,
- * the DMA controller's interrupt handler will start any pending
- * transactions when it becomes idle.
+ * HARDWARE STATE: idle
+ * LOCKING: must hold chan->desc_lock
*/
static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
{
struct fsl_desc_sw *desc;
- unsigned long flags;
-
- spin_lock_irqsave(&chan->desc_lock, flags);
/*
* If the list of pending descriptors is empty, then we
* don't need to do any work at all
*/
if (list_empty(&chan->ld_pending)) {
- dev_dbg(chan->dev, "no pending LDs\n");
- goto out_unlock;
+ chan_dbg(chan, "no pending LDs\n");
+ return;
}
/*
- * The DMA controller is not idle, which means the interrupt
- * handler will start any queued transactions when it runs
- * at the end of the current transaction
+ * The DMA controller is not idle, which means that the interrupt
+ * handler will start any queued transactions when it runs after
+ * this transaction finishes
*/
- if (!dma_is_idle(chan)) {
- dev_dbg(chan->dev, "DMA controller still busy\n");
- goto out_unlock;
+ if (!chan->idle) {
+ chan_dbg(chan, "DMA controller still busy\n");
+ return;
}
/*
- * TODO:
- * make sure the dma_halt() function really un-wedges the
- * controller as much as possible
- */
- dma_halt(chan);
-
- /*
* If there are some link descriptors which have not been
* transferred, we need to start the controller
*/
@@ -931,18 +933,32 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
* Move all elements from the queue of pending transactions
* onto the list of running transactions
*/
+ chan_dbg(chan, "idle, starting controller\n");
desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
/*
+ * The 85xx DMA controller doesn't clear the channel start bit
+ * automatically at the end of a transfer. Therefore we must clear
+ * it in software before starting the transfer.
+ */
+ if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
+ u32 mode;
+
+ mode = DMA_IN(chan, &chan->regs->mr, 32);
+ mode &= ~FSL_DMA_MR_CS;
+ DMA_OUT(chan, &chan->regs->mr, mode, 32);
+ }
+
+ /*
* Program the descriptor's address into the DMA controller,
* then start the DMA transaction
*/
set_cdar(chan, desc->async_tx.phys);
- dma_start(chan);
+ get_cdar(chan);
-out_unlock:
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ dma_start(chan);
+ chan->idle = false;
}
/**
@@ -952,7 +968,11 @@ out_unlock:
static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
{
struct fsldma_chan *chan = to_fsl_chan(dchan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->desc_lock, flags);
fsl_chan_xfer_ld_queue(chan);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
}
/**
@@ -964,16 +984,18 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
struct dma_tx_state *txstate)
{
struct fsldma_chan *chan = to_fsl_chan(dchan);
- dma_cookie_t last_used;
dma_cookie_t last_complete;
+ dma_cookie_t last_used;
+ unsigned long flags;
- fsl_chan_ld_cleanup(chan);
+ spin_lock_irqsave(&chan->desc_lock, flags);
- last_used = dchan->cookie;
last_complete = chan->completed_cookie;
+ last_used = dchan->cookie;
- dma_set_tx_state(txstate, last_complete, last_used, 0);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
@@ -984,21 +1006,20 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
static irqreturn_t fsldma_chan_irq(int irq, void *data)
{
struct fsldma_chan *chan = data;
- int update_cookie = 0;
- int xfer_ld_q = 0;
u32 stat;
/* save and clear the status register */
stat = get_sr(chan);
set_sr(chan, stat);
- dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat);
+ chan_dbg(chan, "irq: stat = 0x%x\n", stat);
+ /* check that this was really our device */
stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
if (!stat)
return IRQ_NONE;
if (stat & FSL_DMA_SR_TE)
- dev_err(chan->dev, "Transfer Error!\n");
+ chan_err(chan, "Transfer Error!\n");
/*
* Programming Error
@@ -1006,29 +1027,10 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
* triger a PE interrupt.
*/
if (stat & FSL_DMA_SR_PE) {
- dev_dbg(chan->dev, "irq: Programming Error INT\n");
- if (get_bcr(chan) == 0) {
- /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
- * Now, update the completed cookie, and continue the
- * next uncompleted transfer.
- */
- update_cookie = 1;
- xfer_ld_q = 1;
- }
+ chan_dbg(chan, "irq: Programming Error INT\n");
stat &= ~FSL_DMA_SR_PE;
- }
-
- /*
- * If the link descriptor segment transfer finishes,
- * we will recycle the used descriptor.
- */
- if (stat & FSL_DMA_SR_EOSI) {
- dev_dbg(chan->dev, "irq: End-of-segments INT\n");
- dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n",
- (unsigned long long)get_cdar(chan),
- (unsigned long long)get_ndar(chan));
- stat &= ~FSL_DMA_SR_EOSI;
- update_cookie = 1;
+ if (get_bcr(chan) != 0)
+ chan_err(chan, "Programming Error!\n");
}
/*
@@ -1036,10 +1038,8 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
* and start the next transfer if it exist.
*/
if (stat & FSL_DMA_SR_EOCDI) {
- dev_dbg(chan->dev, "irq: End-of-Chain link INT\n");
+ chan_dbg(chan, "irq: End-of-Chain link INT\n");
stat &= ~FSL_DMA_SR_EOCDI;
- update_cookie = 1;
- xfer_ld_q = 1;
}
/*
@@ -1048,27 +1048,79 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
* prepare next transfer.
*/
if (stat & FSL_DMA_SR_EOLNI) {
- dev_dbg(chan->dev, "irq: End-of-link INT\n");
+ chan_dbg(chan, "irq: End-of-link INT\n");
stat &= ~FSL_DMA_SR_EOLNI;
- xfer_ld_q = 1;
}
- if (update_cookie)
- fsl_dma_update_completed_cookie(chan);
- if (xfer_ld_q)
- fsl_chan_xfer_ld_queue(chan);
+ /* check that the DMA controller is really idle */
+ if (!dma_is_idle(chan))
+ chan_err(chan, "irq: controller not idle!\n");
+
+ /* check that we handled all of the bits */
if (stat)
- dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat);
+ chan_err(chan, "irq: unhandled sr 0x%08x\n", stat);
- dev_dbg(chan->dev, "irq: Exit\n");
+ /*
+ * Schedule the tasklet to handle all cleanup of the current
+ * transaction. It will start a new transaction if there is
+ * one pending.
+ */
tasklet_schedule(&chan->tasklet);
+ chan_dbg(chan, "irq: Exit\n");
return IRQ_HANDLED;
}
static void dma_do_tasklet(unsigned long data)
{
struct fsldma_chan *chan = (struct fsldma_chan *)data;
- fsl_chan_ld_cleanup(chan);
+ struct fsl_desc_sw *desc, *_desc;
+ LIST_HEAD(ld_cleanup);
+ unsigned long flags;
+
+ chan_dbg(chan, "tasklet entry\n");
+
+ spin_lock_irqsave(&chan->desc_lock, flags);
+
+ /* update the cookie if we have some descriptors to cleanup */
+ if (!list_empty(&chan->ld_running)) {
+ dma_cookie_t cookie;
+
+ desc = to_fsl_desc(chan->ld_running.prev);
+ cookie = desc->async_tx.cookie;
+
+ chan->completed_cookie = cookie;
+ chan_dbg(chan, "completed_cookie=%d\n", cookie);
+ }
+
+ /*
+ * move the descriptors to a temporary list so we can drop the lock
+ * during the entire cleanup operation
+ */
+ list_splice_tail_init(&chan->ld_running, &ld_cleanup);
+
+ /* the hardware is now idle and ready for more */
+ chan->idle = true;
+
+ /*
+ * Start any pending transactions automatically
+ *
+ * In the ideal case, we keep the DMA controller busy while we go
+ * ahead and free the descriptors below.
+ */
+ fsl_chan_xfer_ld_queue(chan);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ /* Run the callback for each descriptor, in order */
+ list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
+
+ /* Remove from the list of transactions */
+ list_del(&desc->node);
+
+ /* Run all cleanup for this descriptor */
+ fsldma_cleanup_descriptor(chan, desc);
+ }
+
+ chan_dbg(chan, "tasklet exit\n");
}
static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
@@ -1116,7 +1168,7 @@ static void fsldma_free_irqs(struct fsldma_device *fdev)
for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
chan = fdev->chan[i];
if (chan && chan->irq != NO_IRQ) {
- dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id);
+ chan_dbg(chan, "free per-channel IRQ\n");
free_irq(chan->irq, chan);
}
}
@@ -1143,19 +1195,16 @@ static int fsldma_request_irqs(struct fsldma_device *fdev)
continue;
if (chan->irq == NO_IRQ) {
- dev_err(fdev->dev, "no interrupts property defined for "
- "DMA channel %d. Please fix your "
- "device tree\n", chan->id);
+ chan_err(chan, "interrupts property missing in device tree\n");
ret = -ENODEV;
goto out_unwind;
}
- dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id);
+ chan_dbg(chan, "request per-channel IRQ\n");
ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
"fsldma-chan", chan);
if (ret) {
- dev_err(fdev->dev, "unable to request IRQ for DMA "
- "channel %d\n", chan->id);
+ chan_err(chan, "unable to request per-channel IRQ\n");
goto out_unwind;
}
}
@@ -1230,6 +1279,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
fdev->chan[chan->id] = chan;
tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
+ snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
/* Initialize the channel */
dma_init(chan);
@@ -1250,6 +1300,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
spin_lock_init(&chan->desc_lock);
INIT_LIST_HEAD(&chan->ld_pending);
INIT_LIST_HEAD(&chan->ld_running);
+ chan->idle = true;
chan->common.device = &fdev->common;
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index ba9f403c0fb..9cb5aa57c67 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -102,8 +102,8 @@ struct fsl_desc_sw {
} __attribute__((aligned(32)));
struct fsldma_chan_regs {
- u32 mr; /* 0x00 - Mode Register */
- u32 sr; /* 0x04 - Status Register */
+ u32 mr; /* 0x00 - Mode Register */
+ u32 sr; /* 0x04 - Status Register */
u64 cdar; /* 0x08 - Current descriptor address register */
u64 sar; /* 0x10 - Source Address Register */
u64 dar; /* 0x18 - Destination Address Register */
@@ -135,6 +135,7 @@ struct fsldma_device {
#define FSL_DMA_CHAN_START_EXT 0x00002000
struct fsldma_chan {
+ char name[8]; /* Channel name */
struct fsldma_chan_regs __iomem *regs;
dma_cookie_t completed_cookie; /* The maximum cookie completed */
spinlock_t desc_lock; /* Descriptor operation lock */
@@ -147,6 +148,7 @@ struct fsldma_chan {
int id; /* Raw id of this channel */
struct tasklet_struct tasklet;
u32 feature;
+ bool idle; /* DMA controller is idle */
void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable);
void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable);
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
new file mode 100644
index 00000000000..88aad4f5400
--- /dev/null
+++ b/drivers/dma/mxs-dma.c
@@ -0,0 +1,724 @@
+/*
+ * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * Refer to drivers/dma/imx-sdma.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+
+#include <asm/irq.h>
+#include <mach/mxs.h>
+#include <mach/dma.h>
+#include <mach/common.h>
+
+/*
+ * NOTE: The term "PIO" throughout the mxs-dma implementation means
+ * PIO mode of mxs apbh-dma and apbx-dma. With this working mode,
+ * dma can program the controller registers of peripheral devices.
+ */
+
+#define MXS_DMA_APBH 0
+#define MXS_DMA_APBX 1
+#define dma_is_apbh() (mxs_dma->dev_id == MXS_DMA_APBH)
+
+#define APBH_VERSION_LATEST 3
+#define apbh_is_old() (mxs_dma->version < APBH_VERSION_LATEST)
+
+#define HW_APBHX_CTRL0 0x000
+#define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29)
+#define BM_APBH_CTRL0_APB_BURST_EN (1 << 28)
+#define BP_APBH_CTRL0_CLKGATE_CHANNEL 8
+#define BP_APBH_CTRL0_RESET_CHANNEL 16
+#define HW_APBHX_CTRL1 0x010
+#define HW_APBHX_CTRL2 0x020
+#define HW_APBHX_CHANNEL_CTRL 0x030
+#define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16
+#define HW_APBH_VERSION (cpu_is_mx23() ? 0x3f0 : 0x800)
+#define HW_APBX_VERSION 0x800
+#define BP_APBHX_VERSION_MAJOR 24
+#define HW_APBHX_CHn_NXTCMDAR(n) \
+ (((dma_is_apbh() && apbh_is_old()) ? 0x050 : 0x110) + (n) * 0x70)
+#define HW_APBHX_CHn_SEMA(n) \
+ (((dma_is_apbh() && apbh_is_old()) ? 0x080 : 0x140) + (n) * 0x70)
+
+/*
+ * ccw bits definitions
+ *
+ * COMMAND: 0..1 (2)
+ * CHAIN: 2 (1)
+ * IRQ: 3 (1)
+ * NAND_LOCK: 4 (1) - not implemented
+ * NAND_WAIT4READY: 5 (1) - not implemented
+ * DEC_SEM: 6 (1)
+ * WAIT4END: 7 (1)
+ * HALT_ON_TERMINATE: 8 (1)
+ * TERMINATE_FLUSH: 9 (1)
+ * RESERVED: 10..11 (2)
+ * PIO_NUM: 12..15 (4)
+ */
+#define BP_CCW_COMMAND 0
+#define BM_CCW_COMMAND (3 << 0)
+#define CCW_CHAIN (1 << 2)
+#define CCW_IRQ (1 << 3)
+#define CCW_DEC_SEM (1 << 6)
+#define CCW_WAIT4END (1 << 7)
+#define CCW_HALT_ON_TERM (1 << 8)
+#define CCW_TERM_FLUSH (1 << 9)
+#define BP_CCW_PIO_NUM 12
+#define BM_CCW_PIO_NUM (0xf << 12)
+
+#define BF_CCW(value, field) (((value) << BP_CCW_##field) & BM_CCW_##field)
+
+#define MXS_DMA_CMD_NO_XFER 0
+#define MXS_DMA_CMD_WRITE 1
+#define MXS_DMA_CMD_READ 2
+#define MXS_DMA_CMD_DMA_SENSE 3 /* not implemented */
+
+struct mxs_dma_ccw {
+ u32 next;
+ u16 bits;
+ u16 xfer_bytes;
+#define MAX_XFER_BYTES 0xff00
+ u32 bufaddr;
+#define MXS_PIO_WORDS 16
+ u32 pio_words[MXS_PIO_WORDS];
+};
+
+#define NUM_CCW (int)(PAGE_SIZE / sizeof(struct mxs_dma_ccw))
+
+struct mxs_dma_chan {
+ struct mxs_dma_engine *mxs_dma;
+ struct dma_chan chan;
+ struct dma_async_tx_descriptor desc;
+ struct tasklet_struct tasklet;
+ int chan_irq;
+ struct mxs_dma_ccw *ccw;
+ dma_addr_t ccw_phys;
+ dma_cookie_t last_completed;
+ enum dma_status status;
+ unsigned int flags;
+#define MXS_DMA_SG_LOOP (1 << 0)
+};
+
+#define MXS_DMA_CHANNELS 16
+#define MXS_DMA_CHANNELS_MASK 0xffff
+
+struct mxs_dma_engine {
+ int dev_id;
+ unsigned int version;
+ void __iomem *base;
+ struct clk *clk;
+ struct dma_device dma_device;
+ struct device_dma_parameters dma_parms;
+ struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
+};
+
+static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
+{
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+ int chan_id = mxs_chan->chan.chan_id;
+
+ if (dma_is_apbh() && apbh_is_old())
+ writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL),
+ mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+ else
+ writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL),
+ mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR);
+}
+
+static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
+{
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+ int chan_id = mxs_chan->chan.chan_id;
+
+ /* set cmd_addr up */
+ writel(mxs_chan->ccw_phys,
+ mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
+
+ /* enable apbh channel clock */
+ if (dma_is_apbh()) {
+ if (apbh_is_old())
+ writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
+ mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
+ else
+ writel(1 << chan_id,
+ mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
+ }
+
+ /* write 1 to SEMA to kick off the channel */
+ writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id));
+}
+
+static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
+{
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+ int chan_id = mxs_chan->chan.chan_id;
+
+ /* disable apbh channel clock */
+ if (dma_is_apbh()) {
+ if (apbh_is_old())
+ writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
+ mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+ else
+ writel(1 << chan_id,
+ mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+ }
+
+ mxs_chan->status = DMA_SUCCESS;
+}
+
+static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
+{
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+ int chan_id = mxs_chan->chan.chan_id;
+
+ /* freeze the channel */
+ if (dma_is_apbh() && apbh_is_old())
+ writel(1 << chan_id,
+ mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+ else
+ writel(1 << chan_id,
+ mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR);
+
+ mxs_chan->status = DMA_PAUSED;
+}
+
+static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan)
+{
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+ int chan_id = mxs_chan->chan.chan_id;
+
+ /* unfreeze the channel */
+ if (dma_is_apbh() && apbh_is_old())
+ writel(1 << chan_id,
+ mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
+ else
+ writel(1 << chan_id,
+ mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_CLR_ADDR);
+
+ mxs_chan->status = DMA_IN_PROGRESS;
+}
+
+static dma_cookie_t mxs_dma_assign_cookie(struct mxs_dma_chan *mxs_chan)
+{
+ dma_cookie_t cookie = mxs_chan->chan.cookie;
+
+ if (++cookie < 0)
+ cookie = 1;
+
+ mxs_chan->chan.cookie = cookie;
+ mxs_chan->desc.cookie = cookie;
+
+ return cookie;
+}
+
+static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct mxs_dma_chan, chan);
+}
+
+static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(tx->chan);
+
+ mxs_dma_enable_chan(mxs_chan);
+
+ return mxs_dma_assign_cookie(mxs_chan);
+}
+
+static void mxs_dma_tasklet(unsigned long data)
+{
+ struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data;
+
+ if (mxs_chan->desc.callback)
+ mxs_chan->desc.callback(mxs_chan->desc.callback_param);
+}
+
+static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
+{
+ struct mxs_dma_engine *mxs_dma = dev_id;
+ u32 stat1, stat2;
+
+ /* completion status */
+ stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1);
+ stat1 &= MXS_DMA_CHANNELS_MASK;
+ writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + MXS_CLR_ADDR);
+
+ /* error status */
+ stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2);
+ writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + MXS_CLR_ADDR);
+
+ /*
+ * When both completion and error of termination bits set at the
+ * same time, we do not take it as an error. IOW, it only becomes
+ * an error we need to handler here in case of ether it's (1) an bus
+ * error or (2) a termination error with no completion.
+ */
+ stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */
+ (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */
+
+ /* combine error and completion status for checking */
+ stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1;
+ while (stat1) {
+ int channel = fls(stat1) - 1;
+ struct mxs_dma_chan *mxs_chan =
+ &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS];
+
+ if (channel >= MXS_DMA_CHANNELS) {
+ dev_dbg(mxs_dma->dma_device.dev,
+ "%s: error in channel %d\n", __func__,
+ channel - MXS_DMA_CHANNELS);
+ mxs_chan->status = DMA_ERROR;
+ mxs_dma_reset_chan(mxs_chan);
+ } else {
+ if (mxs_chan->flags & MXS_DMA_SG_LOOP)
+ mxs_chan->status = DMA_IN_PROGRESS;
+ else
+ mxs_chan->status = DMA_SUCCESS;
+ }
+
+ stat1 &= ~(1 << channel);
+
+ if (mxs_chan->status == DMA_SUCCESS)
+ mxs_chan->last_completed = mxs_chan->desc.cookie;
+
+ /* schedule tasklet on this channel */
+ tasklet_schedule(&mxs_chan->tasklet);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+ struct mxs_dma_data *data = chan->private;
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+ int ret;
+
+ if (!data)
+ return -EINVAL;
+
+ mxs_chan->chan_irq = data->chan_irq;
+
+ mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
+ &mxs_chan->ccw_phys, GFP_KERNEL);
+ if (!mxs_chan->ccw) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ memset(mxs_chan->ccw, 0, PAGE_SIZE);
+
+ ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
+ 0, "mxs-dma", mxs_dma);
+ if (ret)
+ goto err_irq;
+
+ ret = clk_enable(mxs_dma->clk);
+ if (ret)
+ goto err_clk;
+
+ mxs_dma_reset_chan(mxs_chan);
+
+ dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
+ mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
+
+ /* the descriptor is ready */
+ async_tx_ack(&mxs_chan->desc);
+
+ return 0;
+
+err_clk:
+ free_irq(mxs_chan->chan_irq, mxs_dma);
+err_irq:
+ dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
+ mxs_chan->ccw, mxs_chan->ccw_phys);
+err_alloc:
+ return ret;
+}
+
+static void mxs_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+
+ mxs_dma_disable_chan(mxs_chan);
+
+ free_irq(mxs_chan->chan_irq, mxs_dma);
+
+ dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
+ mxs_chan->ccw, mxs_chan->ccw_phys);
+
+ clk_disable(mxs_dma->clk);
+}
+
+static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_data_direction direction,
+ unsigned long append)
+{
+ struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+ struct mxs_dma_ccw *ccw;
+ struct scatterlist *sg;
+ int i, j;
+ u32 *pio;
+ static int idx;
+
+ if (mxs_chan->status == DMA_IN_PROGRESS && !append)
+ return NULL;
+
+ if (sg_len + (append ? idx : 0) > NUM_CCW) {
+ dev_err(mxs_dma->dma_device.dev,
+ "maximum number of sg exceeded: %d > %d\n",
+ sg_len, NUM_CCW);
+ goto err_out;
+ }
+
+ mxs_chan->status = DMA_IN_PROGRESS;
+ mxs_chan->flags = 0;
+
+ /*
+ * If the sg is prepared with append flag set, the sg
+ * will be appended to the last prepared sg.
+ */
+ if (append) {
+ BUG_ON(idx < 1);
+ ccw = &mxs_chan->ccw[idx - 1];
+ ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
+ ccw->bits |= CCW_CHAIN;
+ ccw->bits &= ~CCW_IRQ;
+ ccw->bits &= ~CCW_DEC_SEM;
+ ccw->bits &= ~CCW_WAIT4END;
+ } else {
+ idx = 0;
+ }
+
+ if (direction == DMA_NONE) {
+ ccw = &mxs_chan->ccw[idx++];
+ pio = (u32 *) sgl;
+
+ for (j = 0; j < sg_len;)
+ ccw->pio_words[j++] = *pio++;
+
+ ccw->bits = 0;
+ ccw->bits |= CCW_IRQ;
+ ccw->bits |= CCW_DEC_SEM;
+ ccw->bits |= CCW_WAIT4END;
+ ccw->bits |= CCW_HALT_ON_TERM;
+ ccw->bits |= CCW_TERM_FLUSH;
+ ccw->bits |= BF_CCW(sg_len, PIO_NUM);
+ ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
+ } else {
+ for_each_sg(sgl, sg, sg_len, i) {
+ if (sg->length > MAX_XFER_BYTES) {
+ dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n",
+ sg->length, MAX_XFER_BYTES);
+ goto err_out;
+ }
+
+ ccw = &mxs_chan->ccw[idx++];
+
+ ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
+ ccw->bufaddr = sg->dma_address;
+ ccw->xfer_bytes = sg->length;
+
+ ccw->bits = 0;
+ ccw->bits |= CCW_CHAIN;
+ ccw->bits |= CCW_HALT_ON_TERM;
+ ccw->bits |= CCW_TERM_FLUSH;
+ ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ?
+ MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ,
+ COMMAND);
+
+ if (i + 1 == sg_len) {
+ ccw->bits &= ~CCW_CHAIN;
+ ccw->bits |= CCW_IRQ;
+ ccw->bits |= CCW_DEC_SEM;
+ ccw->bits |= CCW_WAIT4END;
+ }
+ }
+ }
+
+ return &mxs_chan->desc;
+
+err_out:
+ mxs_chan->status = DMA_ERROR;
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+ size_t period_len, enum dma_data_direction direction)
+{
+ struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+ int num_periods = buf_len / period_len;
+ int i = 0, buf = 0;
+
+ if (mxs_chan->status == DMA_IN_PROGRESS)
+ return NULL;
+
+ mxs_chan->status = DMA_IN_PROGRESS;
+ mxs_chan->flags |= MXS_DMA_SG_LOOP;
+
+ if (num_periods > NUM_CCW) {
+ dev_err(mxs_dma->dma_device.dev,
+ "maximum number of sg exceeded: %d > %d\n",
+ num_periods, NUM_CCW);
+ goto err_out;
+ }
+
+ if (period_len > MAX_XFER_BYTES) {
+ dev_err(mxs_dma->dma_device.dev,
+ "maximum period size exceeded: %d > %d\n",
+ period_len, MAX_XFER_BYTES);
+ goto err_out;
+ }
+
+ while (buf < buf_len) {
+ struct mxs_dma_ccw *ccw = &mxs_chan->ccw[i];
+
+ if (i + 1 == num_periods)
+ ccw->next = mxs_chan->ccw_phys;
+ else
+ ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * (i + 1);
+
+ ccw->bufaddr = dma_addr;
+ ccw->xfer_bytes = period_len;
+
+ ccw->bits = 0;
+ ccw->bits |= CCW_CHAIN;
+ ccw->bits |= CCW_IRQ;
+ ccw->bits |= CCW_HALT_ON_TERM;
+ ccw->bits |= CCW_TERM_FLUSH;
+ ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ?
+ MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND);
+
+ dma_addr += period_len;
+ buf += period_len;
+
+ i++;
+ }
+
+ return &mxs_chan->desc;
+
+err_out:
+ mxs_chan->status = DMA_ERROR;
+ return NULL;
+}
+
+static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+ int ret = 0;
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ mxs_dma_disable_chan(mxs_chan);
+ break;
+ case DMA_PAUSE:
+ mxs_dma_pause_chan(mxs_chan);
+ break;
+ case DMA_RESUME:
+ mxs_dma_resume_chan(mxs_chan);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ return ret;
+}
+
+static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+ dma_cookie_t last_used;
+
+ last_used = chan->cookie;
+ dma_set_tx_state(txstate, mxs_chan->last_completed, last_used, 0);
+
+ return mxs_chan->status;
+}
+
+static void mxs_dma_issue_pending(struct dma_chan *chan)
+{
+ /*
+ * Nothing to do. We only have a single descriptor.
+ */
+}
+
+static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
+{
+ int ret;
+
+ ret = clk_enable(mxs_dma->clk);
+ if (ret)
+ goto err_out;
+
+ ret = mxs_reset_block(mxs_dma->base);
+ if (ret)
+ goto err_out;
+
+ /* only major version matters */
+ mxs_dma->version = readl(mxs_dma->base +
+ ((mxs_dma->dev_id == MXS_DMA_APBX) ?
+ HW_APBX_VERSION : HW_APBH_VERSION)) >>
+ BP_APBHX_VERSION_MAJOR;
+
+ /* enable apbh burst */
+ if (dma_is_apbh()) {
+ writel(BM_APBH_CTRL0_APB_BURST_EN,
+ mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+ writel(BM_APBH_CTRL0_APB_BURST8_EN,
+ mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+ }
+
+ /* enable irq for all the channels */
+ writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
+ mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR);
+
+ clk_disable(mxs_dma->clk);
+
+ return 0;
+
+err_out:
+ return ret;
+}
+
+static int __init mxs_dma_probe(struct platform_device *pdev)
+{
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(pdev);
+ struct mxs_dma_engine *mxs_dma;
+ struct resource *iores;
+ int ret, i;
+
+ mxs_dma = kzalloc(sizeof(*mxs_dma), GFP_KERNEL);
+ if (!mxs_dma)
+ return -ENOMEM;
+
+ mxs_dma->dev_id = id_entry->driver_data;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!request_mem_region(iores->start, resource_size(iores),
+ pdev->name)) {
+ ret = -EBUSY;
+ goto err_request_region;
+ }
+
+ mxs_dma->base = ioremap(iores->start, resource_size(iores));
+ if (!mxs_dma->base) {
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ mxs_dma->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(mxs_dma->clk)) {
+ ret = PTR_ERR(mxs_dma->clk);
+ goto err_clk;
+ }
+
+ dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask);
+ dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask);
+
+ INIT_LIST_HEAD(&mxs_dma->dma_device.channels);
+
+ /* Initialize channel parameters */
+ for (i = 0; i < MXS_DMA_CHANNELS; i++) {
+ struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i];
+
+ mxs_chan->mxs_dma = mxs_dma;
+ mxs_chan->chan.device = &mxs_dma->dma_device;
+
+ tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet,
+ (unsigned long) mxs_chan);
+
+
+ /* Add the channel to mxs_chan list */
+ list_add_tail(&mxs_chan->chan.device_node,
+ &mxs_dma->dma_device.channels);
+ }
+
+ ret = mxs_dma_init(mxs_dma);
+ if (ret)
+ goto err_init;
+
+ mxs_dma->dma_device.dev = &pdev->dev;
+
+ /* mxs_dma gets 65535 bytes maximum sg size */
+ mxs_dma->dma_device.dev->dma_parms = &mxs_dma->dma_parms;
+ dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES);
+
+ mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources;
+ mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources;
+ mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status;
+ mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg;
+ mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic;
+ mxs_dma->dma_device.device_control = mxs_dma_control;
+ mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending;
+
+ ret = dma_async_device_register(&mxs_dma->dma_device);
+ if (ret) {
+ dev_err(mxs_dma->dma_device.dev, "unable to register\n");
+ goto err_init;
+ }
+
+ dev_info(mxs_dma->dma_device.dev, "initialized\n");
+
+ return 0;
+
+err_init:
+ clk_put(mxs_dma->clk);
+err_clk:
+ iounmap(mxs_dma->base);
+err_ioremap:
+ release_mem_region(iores->start, resource_size(iores));
+err_request_region:
+ kfree(mxs_dma);
+ return ret;
+}
+
+static struct platform_device_id mxs_dma_type[] = {
+ {
+ .name = "mxs-dma-apbh",
+ .driver_data = MXS_DMA_APBH,
+ }, {
+ .name = "mxs-dma-apbx",
+ .driver_data = MXS_DMA_APBX,
+ }
+};
+
+static struct platform_driver mxs_dma_driver = {
+ .driver = {
+ .name = "mxs-dma",
+ },
+ .id_table = mxs_dma_type,
+};
+
+static int __init mxs_dma_module_init(void)
+{
+ return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe);
+}
+subsys_initcall(mxs_dma_module_init);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 1c38418ae61..8d8fef1480a 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -82,7 +82,7 @@ struct pch_dma_regs {
u32 dma_sts1;
u32 reserved2;
u32 reserved3;
- struct pch_dma_desc_regs desc[0];
+ struct pch_dma_desc_regs desc[MAX_CHAN_NR];
};
struct pch_dma_desc {
@@ -124,7 +124,7 @@ struct pch_dma {
struct pci_pool *pool;
struct pch_dma_regs regs;
struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
- struct pch_dma_chan channels[0];
+ struct pch_dma_chan channels[MAX_CHAN_NR];
};
#define PCH_DMA_CTL0 0x00
@@ -366,7 +366,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
dma_cookie_t cookie;
- spin_lock_bh(&pd_chan->lock);
+ spin_lock(&pd_chan->lock);
cookie = pdc_assign_cookie(pd_chan, desc);
if (list_empty(&pd_chan->active_list)) {
@@ -376,7 +376,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
list_add_tail(&desc->desc_node, &pd_chan->queue);
}
- spin_unlock_bh(&pd_chan->lock);
+ spin_unlock(&pd_chan->lock);
return 0;
}
@@ -386,7 +386,7 @@ static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
struct pch_dma *pd = to_pd(chan->device);
dma_addr_t addr;
- desc = pci_pool_alloc(pd->pool, GFP_KERNEL, &addr);
+ desc = pci_pool_alloc(pd->pool, flags, &addr);
if (desc) {
memset(desc, 0, sizeof(struct pch_dma_desc));
INIT_LIST_HEAD(&desc->tx_list);
@@ -405,7 +405,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
struct pch_dma_desc *ret = NULL;
int i;
- spin_lock_bh(&pd_chan->lock);
+ spin_lock(&pd_chan->lock);
list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
i++;
if (async_tx_test_ack(&desc->txd)) {
@@ -415,15 +415,15 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
}
dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
}
- spin_unlock_bh(&pd_chan->lock);
+ spin_unlock(&pd_chan->lock);
dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
if (!ret) {
ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO);
if (ret) {
- spin_lock_bh(&pd_chan->lock);
+ spin_lock(&pd_chan->lock);
pd_chan->descs_allocated++;
- spin_unlock_bh(&pd_chan->lock);
+ spin_unlock(&pd_chan->lock);
} else {
dev_err(chan2dev(&pd_chan->chan),
"failed to alloc desc\n");
@@ -437,10 +437,10 @@ static void pdc_desc_put(struct pch_dma_chan *pd_chan,
struct pch_dma_desc *desc)
{
if (desc) {
- spin_lock_bh(&pd_chan->lock);
+ spin_lock(&pd_chan->lock);
list_splice_init(&desc->tx_list, &pd_chan->free_list);
list_add(&desc->desc_node, &pd_chan->free_list);
- spin_unlock_bh(&pd_chan->lock);
+ spin_unlock(&pd_chan->lock);
}
}
@@ -530,9 +530,9 @@ static void pd_issue_pending(struct dma_chan *chan)
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
if (pdc_is_idle(pd_chan)) {
- spin_lock_bh(&pd_chan->lock);
+ spin_lock(&pd_chan->lock);
pdc_advance_work(pd_chan);
- spin_unlock_bh(&pd_chan->lock);
+ spin_unlock(&pd_chan->lock);
}
}
@@ -592,7 +592,6 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
goto err_desc_get;
}
-
if (!first) {
first = desc;
} else {
@@ -641,13 +640,13 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
spin_unlock_bh(&pd_chan->lock);
-
return 0;
}
static void pdc_tasklet(unsigned long data)
{
struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data;
+ unsigned long flags;
if (!pdc_is_idle(pd_chan)) {
dev_err(chan2dev(&pd_chan->chan),
@@ -655,12 +654,12 @@ static void pdc_tasklet(unsigned long data)
return;
}
- spin_lock_bh(&pd_chan->lock);
+ spin_lock_irqsave(&pd_chan->lock, flags);
if (test_and_clear_bit(0, &pd_chan->err_status))
pdc_handle_error(pd_chan);
else
pdc_advance_work(pd_chan);
- spin_unlock_bh(&pd_chan->lock);
+ spin_unlock_irqrestore(&pd_chan->lock, flags);
}
static irqreturn_t pd_irq(int irq, void *devid)
@@ -694,6 +693,7 @@ static irqreturn_t pd_irq(int irq, void *devid)
return ret;
}
+#ifdef CONFIG_PM
static void pch_dma_save_regs(struct pch_dma *pd)
{
struct pch_dma_chan *pd_chan;
@@ -771,6 +771,7 @@ static int pch_dma_resume(struct pci_dev *pdev)
return 0;
}
+#endif
static int __devinit pch_dma_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 7c50f6dfd3f..6abe1ec1f2c 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -657,7 +657,7 @@ static irqreturn_t pl330_irq_handler(int irq, void *data)
}
static int __devinit
-pl330_probe(struct amba_device *adev, struct amba_id *id)
+pl330_probe(struct amba_device *adev, const struct amba_id *id)
{
struct dma_pl330_platdata *pdat;
struct dma_pl330_dmac *pdmac;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 6e1d46a65d0..af955de035f 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -68,6 +68,7 @@ enum d40_command {
* @base: Pointer to memory area when the pre_alloc_lli's are not large
* enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
* pre_alloc_lli is used.
+ * @dma_addr: DMA address, if mapped
* @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
* @pre_alloc_lli: Pre allocated area for the most common case of transfers,
* one buffer to one buffer.
@@ -75,6 +76,7 @@ enum d40_command {
struct d40_lli_pool {
void *base;
int size;
+ dma_addr_t dma_addr;
/* Space for dst and src, plus an extra for padding */
u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
};
@@ -94,7 +96,6 @@ struct d40_lli_pool {
* during a transfer.
* @node: List entry.
* @is_in_client_list: true if the client owns this descriptor.
- * @is_hw_linked: true if this job will automatically be continued for
* the previous one.
*
* This descriptor is used for both logical and physical transfers.
@@ -114,7 +115,7 @@ struct d40_desc {
struct list_head node;
bool is_in_client_list;
- bool is_hw_linked;
+ bool cyclic;
};
/**
@@ -130,6 +131,7 @@ struct d40_desc {
*/
struct d40_lcla_pool {
void *base;
+ dma_addr_t dma_addr;
void *base_unaligned;
int pages;
spinlock_t lock;
@@ -303,9 +305,37 @@ struct d40_reg_val {
unsigned int val;
};
-static int d40_pool_lli_alloc(struct d40_desc *d40d,
- int lli_len, bool is_log)
+static struct device *chan2dev(struct d40_chan *d40c)
{
+ return &d40c->chan.dev->device;
+}
+
+static bool chan_is_physical(struct d40_chan *chan)
+{
+ return chan->log_num == D40_PHY_CHAN;
+}
+
+static bool chan_is_logical(struct d40_chan *chan)
+{
+ return !chan_is_physical(chan);
+}
+
+static void __iomem *chan_base(struct d40_chan *chan)
+{
+ return chan->base->virtbase + D40_DREG_PCBASE +
+ chan->phy_chan->num * D40_DREG_PCDELTA;
+}
+
+#define d40_err(dev, format, arg...) \
+ dev_err(dev, "[%s] " format, __func__, ## arg)
+
+#define chan_err(d40c, format, arg...) \
+ d40_err(chan2dev(d40c), format, ## arg)
+
+static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
+ int lli_len)
+{
+ bool is_log = chan_is_logical(d40c);
u32 align;
void *base;
@@ -319,7 +349,7 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d,
d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
d40d->lli_pool.base = NULL;
} else {
- d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
+ d40d->lli_pool.size = lli_len * 2 * align;
base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
d40d->lli_pool.base = base;
@@ -329,22 +359,37 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d,
}
if (is_log) {
- d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
- align);
- d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
- align);
+ d40d->lli_log.src = PTR_ALIGN(base, align);
+ d40d->lli_log.dst = d40d->lli_log.src + lli_len;
+
+ d40d->lli_pool.dma_addr = 0;
} else {
- d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
- align);
- d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
- align);
+ d40d->lli_phy.src = PTR_ALIGN(base, align);
+ d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
+
+ d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
+ d40d->lli_phy.src,
+ d40d->lli_pool.size,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(d40c->base->dev,
+ d40d->lli_pool.dma_addr)) {
+ kfree(d40d->lli_pool.base);
+ d40d->lli_pool.base = NULL;
+ d40d->lli_pool.dma_addr = 0;
+ return -ENOMEM;
+ }
}
return 0;
}
-static void d40_pool_lli_free(struct d40_desc *d40d)
+static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
{
+ if (d40d->lli_pool.dma_addr)
+ dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
+ d40d->lli_pool.size, DMA_TO_DEVICE);
+
kfree(d40d->lli_pool.base);
d40d->lli_pool.base = NULL;
d40d->lli_pool.size = 0;
@@ -391,7 +436,7 @@ static int d40_lcla_free_all(struct d40_chan *d40c,
int i;
int ret = -EINVAL;
- if (d40c->log_num == D40_PHY_CHAN)
+ if (chan_is_physical(d40c))
return 0;
spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
@@ -430,7 +475,7 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
list_for_each_entry_safe(d, _d, &d40c->client, node)
if (async_tx_test_ack(&d->txd)) {
- d40_pool_lli_free(d);
+ d40_pool_lli_free(d40c, d);
d40_desc_remove(d);
desc = d;
memset(desc, 0, sizeof(*desc));
@@ -450,6 +495,7 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
{
+ d40_pool_lli_free(d40c, d40d);
d40_lcla_free_all(d40c, d40d);
kmem_cache_free(d40c->base->desc_slab, d40d);
}
@@ -459,57 +505,128 @@ static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
list_add_tail(&desc->node, &d40c->active);
}
-static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
+static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
{
- int curr_lcla = -EINVAL, next_lcla;
+ struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
+ struct d40_phy_lli *lli_src = desc->lli_phy.src;
+ void __iomem *base = chan_base(chan);
+
+ writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
+ writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
+ writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
+ writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
+
+ writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
+ writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
+ writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
+ writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
+}
- if (d40c->log_num == D40_PHY_CHAN) {
- d40_phy_lli_write(d40c->base->virtbase,
- d40c->phy_chan->num,
- d40d->lli_phy.dst,
- d40d->lli_phy.src);
- d40d->lli_current = d40d->lli_len;
- } else {
+static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
+{
+ struct d40_lcla_pool *pool = &chan->base->lcla_pool;
+ struct d40_log_lli_bidir *lli = &desc->lli_log;
+ int lli_current = desc->lli_current;
+ int lli_len = desc->lli_len;
+ bool cyclic = desc->cyclic;
+ int curr_lcla = -EINVAL;
+ int first_lcla = 0;
+ bool linkback;
- if ((d40d->lli_len - d40d->lli_current) > 1)
- curr_lcla = d40_lcla_alloc_one(d40c, d40d);
+ /*
+ * We may have partially running cyclic transfers, in case we did't get
+ * enough LCLA entries.
+ */
+ linkback = cyclic && lli_current == 0;
- d40_log_lli_lcpa_write(d40c->lcpa,
- &d40d->lli_log.dst[d40d->lli_current],
- &d40d->lli_log.src[d40d->lli_current],
- curr_lcla);
+ /*
+ * For linkback, we need one LCLA even with only one link, because we
+ * can't link back to the one in LCPA space
+ */
+ if (linkback || (lli_len - lli_current > 1)) {
+ curr_lcla = d40_lcla_alloc_one(chan, desc);
+ first_lcla = curr_lcla;
+ }
- d40d->lli_current++;
- for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) {
- struct d40_log_lli *lcla;
+ /*
+ * For linkback, we normally load the LCPA in the loop since we need to
+ * link it to the second LCLA and not the first. However, if we
+ * couldn't even get a first LCLA, then we have to run in LCPA and
+ * reload manually.
+ */
+ if (!linkback || curr_lcla == -EINVAL) {
+ unsigned int flags = 0;
- if (d40d->lli_current + 1 < d40d->lli_len)
- next_lcla = d40_lcla_alloc_one(d40c, d40d);
- else
- next_lcla = -EINVAL;
+ if (curr_lcla == -EINVAL)
+ flags |= LLI_TERM_INT;
- lcla = d40c->base->lcla_pool.base +
- d40c->phy_chan->num * 1024 +
- 8 * curr_lcla * 2;
+ d40_log_lli_lcpa_write(chan->lcpa,
+ &lli->dst[lli_current],
+ &lli->src[lli_current],
+ curr_lcla,
+ flags);
+ lli_current++;
+ }
- d40_log_lli_lcla_write(lcla,
- &d40d->lli_log.dst[d40d->lli_current],
- &d40d->lli_log.src[d40d->lli_current],
- next_lcla);
+ if (curr_lcla < 0)
+ goto out;
- (void) dma_map_single(d40c->base->dev, lcla,
- 2 * sizeof(struct d40_log_lli),
- DMA_TO_DEVICE);
+ for (; lli_current < lli_len; lli_current++) {
+ unsigned int lcla_offset = chan->phy_chan->num * 1024 +
+ 8 * curr_lcla * 2;
+ struct d40_log_lli *lcla = pool->base + lcla_offset;
+ unsigned int flags = 0;
+ int next_lcla;
- curr_lcla = next_lcla;
+ if (lli_current + 1 < lli_len)
+ next_lcla = d40_lcla_alloc_one(chan, desc);
+ else
+ next_lcla = linkback ? first_lcla : -EINVAL;
- if (curr_lcla == -EINVAL) {
- d40d->lli_current++;
- break;
- }
+ if (cyclic || next_lcla == -EINVAL)
+ flags |= LLI_TERM_INT;
+
+ if (linkback && curr_lcla == first_lcla) {
+ /* First link goes in both LCPA and LCLA */
+ d40_log_lli_lcpa_write(chan->lcpa,
+ &lli->dst[lli_current],
+ &lli->src[lli_current],
+ next_lcla, flags);
+ }
+
+ /*
+ * One unused LCLA in the cyclic case if the very first
+ * next_lcla fails...
+ */
+ d40_log_lli_lcla_write(lcla,
+ &lli->dst[lli_current],
+ &lli->src[lli_current],
+ next_lcla, flags);
+
+ dma_sync_single_range_for_device(chan->base->dev,
+ pool->dma_addr, lcla_offset,
+ 2 * sizeof(struct d40_log_lli),
+ DMA_TO_DEVICE);
+ curr_lcla = next_lcla;
+
+ if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
+ lli_current++;
+ break;
}
}
+
+out:
+ desc->lli_current = lli_current;
+}
+
+static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
+{
+ if (chan_is_physical(d40c)) {
+ d40_phy_lli_load(d40c, d40d);
+ d40d->lli_current = d40d->lli_len;
+ } else
+ d40_log_lli_to_lcxa(d40c, d40d);
}
static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
@@ -543,18 +660,6 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
return d;
}
-static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
-{
- struct d40_desc *d;
-
- if (list_empty(&d40c->queue))
- return NULL;
- list_for_each_entry(d, &d40c->queue, node)
- if (list_is_last(&d->node, &d40c->queue))
- break;
- return d;
-}
-
static int d40_psize_2_burst_size(bool is_log, int psize)
{
if (is_log) {
@@ -666,9 +771,9 @@ static int d40_channel_execute_command(struct d40_chan *d40c,
}
if (i == D40_SUSPEND_MAX_IT) {
- dev_err(&d40c->chan.dev->device,
- "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
- __func__, d40c->phy_chan->num, d40c->log_num,
+ chan_err(d40c,
+ "unable to suspend the chl %d (log: %d) status %x\n",
+ d40c->phy_chan->num, d40c->log_num,
status);
dump_stack();
ret = -EBUSY;
@@ -701,17 +806,45 @@ static void d40_term_all(struct d40_chan *d40c)
d40c->busy = false;
}
+static void __d40_config_set_event(struct d40_chan *d40c, bool enable,
+ u32 event, int reg)
+{
+ void __iomem *addr = chan_base(d40c) + reg;
+ int tries;
+
+ if (!enable) {
+ writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
+ | ~D40_EVENTLINE_MASK(event), addr);
+ return;
+ }
+
+ /*
+ * The hardware sometimes doesn't register the enable when src and dst
+ * event lines are active on the same logical channel. Retry to ensure
+ * it does. Usually only one retry is sufficient.
+ */
+ tries = 100;
+ while (--tries) {
+ writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
+ | ~D40_EVENTLINE_MASK(event), addr);
+
+ if (readl(addr) & D40_EVENTLINE_MASK(event))
+ break;
+ }
+
+ if (tries != 99)
+ dev_dbg(chan2dev(d40c),
+ "[%s] workaround enable S%cLNK (%d tries)\n",
+ __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
+ 100 - tries);
+
+ WARN_ON(!tries);
+}
+
static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
{
- u32 val;
unsigned long flags;
- /* Notice, that disable requires the physical channel to be stopped */
- if (do_enable)
- val = D40_ACTIVATE_EVENTLINE;
- else
- val = D40_DEACTIVATE_EVENTLINE;
-
spin_lock_irqsave(&d40c->phy_chan->lock, flags);
/* Enable event line connected to device (or memcpy) */
@@ -719,20 +852,15 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
(d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
- writel((val << D40_EVENTLINE_POS(event)) |
- ~D40_EVENTLINE_MASK(event),
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SSLNK);
+ __d40_config_set_event(d40c, do_enable, event,
+ D40_CHAN_REG_SSLNK);
}
+
if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
- writel((val << D40_EVENTLINE_POS(event)) |
- ~D40_EVENTLINE_MASK(event),
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDLNK);
+ __d40_config_set_event(d40c, do_enable, event,
+ D40_CHAN_REG_SDLNK);
}
spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
@@ -740,15 +868,12 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
static u32 d40_chan_has_events(struct d40_chan *d40c)
{
+ void __iomem *chanbase = chan_base(d40c);
u32 val;
- val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SSLNK);
+ val = readl(chanbase + D40_CHAN_REG_SSLNK);
+ val |= readl(chanbase + D40_CHAN_REG_SDLNK);
- val |= readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDLNK);
return val;
}
@@ -771,7 +896,7 @@ static u32 d40_get_prmo(struct d40_chan *d40c)
= D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
};
- if (d40c->log_num == D40_PHY_CHAN)
+ if (chan_is_physical(d40c))
return phy_map[d40c->dma_cfg.mode_opt];
else
return log_map[d40c->dma_cfg.mode_opt];
@@ -785,7 +910,7 @@ static void d40_config_write(struct d40_chan *d40c)
/* Odd addresses are even addresses + 4 */
addr_base = (d40c->phy_chan->num % 2) * 4;
/* Setup channel mode to logical or physical */
- var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
+ var = ((u32)(chan_is_logical(d40c)) + 1) <<
D40_CHAN_POS(d40c->phy_chan->num);
writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
@@ -794,30 +919,18 @@ static void d40_config_write(struct d40_chan *d40c)
writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
- if (d40c->log_num != D40_PHY_CHAN) {
+ if (chan_is_logical(d40c)) {
+ int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
+ & D40_SREG_ELEM_LOG_LIDX_MASK;
+ void __iomem *chanbase = chan_base(d40c);
+
/* Set default config for CFG reg */
- writel(d40c->src_def_cfg,
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SSCFG);
- writel(d40c->dst_def_cfg,
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDCFG);
+ writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
+ writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
/* Set LIDX for lcla */
- writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
- D40_SREG_ELEM_LOG_LIDX_MASK,
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDELT);
-
- writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
- D40_SREG_ELEM_LOG_LIDX_MASK,
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SSELT);
-
+ writel(lidx, chanbase + D40_CHAN_REG_SSELT);
+ writel(lidx, chanbase + D40_CHAN_REG_SDELT);
}
}
@@ -825,15 +938,15 @@ static u32 d40_residue(struct d40_chan *d40c)
{
u32 num_elt;
- if (d40c->log_num != D40_PHY_CHAN)
+ if (chan_is_logical(d40c))
num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
>> D40_MEM_LCSP2_ECNT_POS;
- else
- num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDELT) &
- D40_SREG_ELEM_PHY_ECNT_MASK) >>
- D40_SREG_ELEM_PHY_ECNT_POS;
+ else {
+ u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
+ num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
+ >> D40_SREG_ELEM_PHY_ECNT_POS;
+ }
+
return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
}
@@ -841,20 +954,17 @@ static bool d40_tx_is_linked(struct d40_chan *d40c)
{
bool is_link;
- if (d40c->log_num != D40_PHY_CHAN)
+ if (chan_is_logical(d40c))
is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
else
- is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDLNK) &
- D40_SREG_LNK_PHYS_LNK_MASK;
+ is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
+ & D40_SREG_LNK_PHYS_LNK_MASK;
+
return is_link;
}
-static int d40_pause(struct dma_chan *chan)
+static int d40_pause(struct d40_chan *d40c)
{
- struct d40_chan *d40c =
- container_of(chan, struct d40_chan, chan);
int res = 0;
unsigned long flags;
@@ -865,7 +975,7 @@ static int d40_pause(struct dma_chan *chan)
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
if (res == 0) {
- if (d40c->log_num != D40_PHY_CHAN) {
+ if (chan_is_logical(d40c)) {
d40_config_set_event(d40c, false);
/* Resume the other logical channels if any */
if (d40_chan_has_events(d40c))
@@ -878,10 +988,8 @@ static int d40_pause(struct dma_chan *chan)
return res;
}
-static int d40_resume(struct dma_chan *chan)
+static int d40_resume(struct d40_chan *d40c)
{
- struct d40_chan *d40c =
- container_of(chan, struct d40_chan, chan);
int res = 0;
unsigned long flags;
@@ -891,7 +999,7 @@ static int d40_resume(struct dma_chan *chan)
spin_lock_irqsave(&d40c->lock, flags);
if (d40c->base->rev == 0)
- if (d40c->log_num != D40_PHY_CHAN) {
+ if (chan_is_logical(d40c)) {
res = d40_channel_execute_command(d40c,
D40_DMA_SUSPEND_REQ);
goto no_suspend;
@@ -900,7 +1008,7 @@ static int d40_resume(struct dma_chan *chan)
/* If bytes left to transfer or linked tx resume job */
if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
- if (d40c->log_num != D40_PHY_CHAN)
+ if (chan_is_logical(d40c))
d40_config_set_event(d40c, true);
res = d40_channel_execute_command(d40c, D40_DMA_RUN);
@@ -911,75 +1019,20 @@ no_suspend:
return res;
}
-static void d40_tx_submit_log(struct d40_chan *d40c, struct d40_desc *d40d)
+static int d40_terminate_all(struct d40_chan *chan)
{
- /* TODO: Write */
-}
-
-static void d40_tx_submit_phy(struct d40_chan *d40c, struct d40_desc *d40d)
-{
- struct d40_desc *d40d_prev = NULL;
- int i;
- u32 val;
-
- if (!list_empty(&d40c->queue))
- d40d_prev = d40_last_queued(d40c);
- else if (!list_empty(&d40c->active))
- d40d_prev = d40_first_active_get(d40c);
-
- if (!d40d_prev)
- return;
-
- /* Here we try to join this job with previous jobs */
- val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SSLNK);
-
- /* Figure out which link we're currently transmitting */
- for (i = 0; i < d40d_prev->lli_len; i++)
- if (val == d40d_prev->lli_phy.src[i].reg_lnk)
- break;
-
- val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SSELT) >> D40_SREG_ELEM_LOG_ECNT_POS;
-
- if (i == (d40d_prev->lli_len - 1) && val > 0) {
- /* Change the current one */
- writel(virt_to_phys(d40d->lli_phy.src),
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SSLNK);
- writel(virt_to_phys(d40d->lli_phy.dst),
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDLNK);
-
- d40d->is_hw_linked = true;
-
- } else if (i < d40d_prev->lli_len) {
- (void) dma_unmap_single(d40c->base->dev,
- virt_to_phys(d40d_prev->lli_phy.src),
- d40d_prev->lli_pool.size,
- DMA_TO_DEVICE);
+ unsigned long flags;
+ int ret = 0;
- /* Keep the settings */
- val = d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk &
- ~D40_SREG_LNK_PHYS_LNK_MASK;
- d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk =
- val | virt_to_phys(d40d->lli_phy.src);
+ ret = d40_pause(chan);
+ if (!ret && chan_is_physical(chan))
+ ret = d40_channel_execute_command(chan, D40_DMA_STOP);
- val = d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk &
- ~D40_SREG_LNK_PHYS_LNK_MASK;
- d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk =
- val | virt_to_phys(d40d->lli_phy.dst);
+ spin_lock_irqsave(&chan->lock, flags);
+ d40_term_all(chan);
+ spin_unlock_irqrestore(&chan->lock, flags);
- (void) dma_map_single(d40c->base->dev,
- d40d_prev->lli_phy.src,
- d40d_prev->lli_pool.size,
- DMA_TO_DEVICE);
- d40d->is_hw_linked = true;
- }
+ return ret;
}
static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
@@ -990,8 +1043,6 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
unsigned long flags;
- (void) d40_pause(&d40c->chan);
-
spin_lock_irqsave(&d40c->lock, flags);
d40c->chan.cookie++;
@@ -1001,17 +1052,10 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
d40d->txd.cookie = d40c->chan.cookie;
- if (d40c->log_num == D40_PHY_CHAN)
- d40_tx_submit_phy(d40c, d40d);
- else
- d40_tx_submit_log(d40c, d40d);
-
d40_desc_queue(d40c, d40d);
spin_unlock_irqrestore(&d40c->lock, flags);
- (void) d40_resume(&d40c->chan);
-
return tx->cookie;
}
@@ -1020,7 +1064,7 @@ static int d40_start(struct d40_chan *d40c)
if (d40c->base->rev == 0) {
int err;
- if (d40c->log_num != D40_PHY_CHAN) {
+ if (chan_is_logical(d40c)) {
err = d40_channel_execute_command(d40c,
D40_DMA_SUSPEND_REQ);
if (err)
@@ -1028,7 +1072,7 @@ static int d40_start(struct d40_chan *d40c)
}
}
- if (d40c->log_num != D40_PHY_CHAN)
+ if (chan_is_logical(d40c))
d40_config_set_event(d40c, true);
return d40_channel_execute_command(d40c, D40_DMA_RUN);
@@ -1051,21 +1095,14 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
/* Add to active queue */
d40_desc_submit(d40c, d40d);
- /*
- * If this job is already linked in hw,
- * do not submit it.
- */
-
- if (!d40d->is_hw_linked) {
- /* Initiate DMA job */
- d40_desc_load(d40c, d40d);
+ /* Initiate DMA job */
+ d40_desc_load(d40c, d40d);
- /* Start dma job */
- err = d40_start(d40c);
+ /* Start dma job */
+ err = d40_start(d40c);
- if (err)
- return NULL;
- }
+ if (err)
+ return NULL;
}
return d40d;
@@ -1082,17 +1119,36 @@ static void dma_tc_handle(struct d40_chan *d40c)
if (d40d == NULL)
return;
- d40_lcla_free_all(d40c, d40d);
+ if (d40d->cyclic) {
+ /*
+ * If this was a paritially loaded list, we need to reloaded
+ * it, and only when the list is completed. We need to check
+ * for done because the interrupt will hit for every link, and
+ * not just the last one.
+ */
+ if (d40d->lli_current < d40d->lli_len
+ && !d40_tx_is_linked(d40c)
+ && !d40_residue(d40c)) {
+ d40_lcla_free_all(d40c, d40d);
+ d40_desc_load(d40c, d40d);
+ (void) d40_start(d40c);
- if (d40d->lli_current < d40d->lli_len) {
- d40_desc_load(d40c, d40d);
- /* Start dma job */
- (void) d40_start(d40c);
- return;
- }
+ if (d40d->lli_current == d40d->lli_len)
+ d40d->lli_current = 0;
+ }
+ } else {
+ d40_lcla_free_all(d40c, d40d);
- if (d40_queue_start(d40c) == NULL)
- d40c->busy = false;
+ if (d40d->lli_current < d40d->lli_len) {
+ d40_desc_load(d40c, d40d);
+ /* Start dma job */
+ (void) d40_start(d40c);
+ return;
+ }
+
+ if (d40_queue_start(d40c) == NULL)
+ d40c->busy = false;
+ }
d40c->pending_tx++;
tasklet_schedule(&d40c->tasklet);
@@ -1111,11 +1167,11 @@ static void dma_tasklet(unsigned long data)
/* Get first active entry from list */
d40d = d40_first_active_get(d40c);
-
if (d40d == NULL)
goto err;
- d40c->completed = d40d->txd.cookie;
+ if (!d40d->cyclic)
+ d40c->completed = d40d->txd.cookie;
/*
* If terminating a channel pending_tx is set to zero.
@@ -1130,16 +1186,18 @@ static void dma_tasklet(unsigned long data)
callback = d40d->txd.callback;
callback_param = d40d->txd.callback_param;
- if (async_tx_test_ack(&d40d->txd)) {
- d40_pool_lli_free(d40d);
- d40_desc_remove(d40d);
- d40_desc_free(d40c, d40d);
- } else {
- if (!d40d->is_in_client_list) {
+ if (!d40d->cyclic) {
+ if (async_tx_test_ack(&d40d->txd)) {
+ d40_pool_lli_free(d40c, d40d);
d40_desc_remove(d40d);
- d40_lcla_free_all(d40c, d40d);
- list_add_tail(&d40d->node, &d40c->client);
- d40d->is_in_client_list = true;
+ d40_desc_free(d40c, d40d);
+ } else {
+ if (!d40d->is_in_client_list) {
+ d40_desc_remove(d40d);
+ d40_lcla_free_all(d40c, d40d);
+ list_add_tail(&d40d->node, &d40c->client);
+ d40d->is_in_client_list = true;
+ }
}
}
@@ -1216,9 +1274,8 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
if (!il[row].is_error)
dma_tc_handle(d40c);
else
- dev_err(base->dev,
- "[%s] IRQ chan: %ld offset %d idx %d\n",
- __func__, chan, il[row].offset, idx);
+ d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
+ chan, il[row].offset, idx);
spin_unlock(&d40c->lock);
}
@@ -1237,8 +1294,7 @@ static int d40_validate_conf(struct d40_chan *d40c,
bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
if (!conf->dir) {
- dev_err(&d40c->chan.dev->device, "[%s] Invalid direction.\n",
- __func__);
+ chan_err(d40c, "Invalid direction.\n");
res = -EINVAL;
}
@@ -1246,46 +1302,40 @@ static int d40_validate_conf(struct d40_chan *d40c,
d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
d40c->runtime_addr == 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Invalid TX channel address (%d)\n",
- __func__, conf->dst_dev_type);
+ chan_err(d40c, "Invalid TX channel address (%d)\n",
+ conf->dst_dev_type);
res = -EINVAL;
}
if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
d40c->runtime_addr == 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Invalid RX channel address (%d)\n",
- __func__, conf->src_dev_type);
+ chan_err(d40c, "Invalid RX channel address (%d)\n",
+ conf->src_dev_type);
res = -EINVAL;
}
if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
dst_event_group == STEDMA40_DEV_DST_MEMORY) {
- dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
- __func__);
+ chan_err(d40c, "Invalid dst\n");
res = -EINVAL;
}
if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
src_event_group == STEDMA40_DEV_SRC_MEMORY) {
- dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
- __func__);
+ chan_err(d40c, "Invalid src\n");
res = -EINVAL;
}
if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
- dev_err(&d40c->chan.dev->device,
- "[%s] No event line\n", __func__);
+ chan_err(d40c, "No event line\n");
res = -EINVAL;
}
if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
(src_event_group != dst_event_group)) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Invalid event group\n", __func__);
+ chan_err(d40c, "Invalid event group\n");
res = -EINVAL;
}
@@ -1294,9 +1344,7 @@ static int d40_validate_conf(struct d40_chan *d40c,
* DMAC HW supports it. Will be added to this driver,
* in case any dma client requires it.
*/
- dev_err(&d40c->chan.dev->device,
- "[%s] periph to periph not supported\n",
- __func__);
+ chan_err(d40c, "periph to periph not supported\n");
res = -EINVAL;
}
@@ -1309,9 +1357,7 @@ static int d40_validate_conf(struct d40_chan *d40c,
* src (burst x width) == dst (burst x width)
*/
- dev_err(&d40c->chan.dev->device,
- "[%s] src (burst x width) != dst (burst x width)\n",
- __func__);
+ chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
res = -EINVAL;
}
@@ -1514,8 +1560,7 @@ static int d40_config_memcpy(struct d40_chan *d40c)
dma_has_cap(DMA_SLAVE, cap)) {
d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
} else {
- dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
- __func__);
+ chan_err(d40c, "No memcpy\n");
return -EINVAL;
}
@@ -1540,21 +1585,19 @@ static int d40_free_dma(struct d40_chan *d40c)
/* Release client owned descriptors */
if (!list_empty(&d40c->client))
list_for_each_entry_safe(d, _d, &d40c->client, node) {
- d40_pool_lli_free(d);
+ d40_pool_lli_free(d40c, d);
d40_desc_remove(d);
d40_desc_free(d40c, d);
}
if (phy == NULL) {
- dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
- __func__);
+ chan_err(d40c, "phy == null\n");
return -EINVAL;
}
if (phy->allocated_src == D40_ALLOC_FREE &&
phy->allocated_dst == D40_ALLOC_FREE) {
- dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
- __func__);
+ chan_err(d40c, "channel already free\n");
return -EINVAL;
}
@@ -1566,19 +1609,17 @@ static int d40_free_dma(struct d40_chan *d40c)
event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
is_src = true;
} else {
- dev_err(&d40c->chan.dev->device,
- "[%s] Unknown direction\n", __func__);
+ chan_err(d40c, "Unknown direction\n");
return -EINVAL;
}
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
if (res) {
- dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n",
- __func__);
+ chan_err(d40c, "suspend failed\n");
return res;
}
- if (d40c->log_num != D40_PHY_CHAN) {
+ if (chan_is_logical(d40c)) {
/* Release logical channel, deactivate the event line */
d40_config_set_event(d40c, false);
@@ -1594,9 +1635,8 @@ static int d40_free_dma(struct d40_chan *d40c)
res = d40_channel_execute_command(d40c,
D40_DMA_RUN);
if (res) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Executing RUN command\n",
- __func__);
+ chan_err(d40c,
+ "Executing RUN command\n");
return res;
}
}
@@ -1609,8 +1649,7 @@ static int d40_free_dma(struct d40_chan *d40c)
/* Release physical channel */
res = d40_channel_execute_command(d40c, D40_DMA_STOP);
if (res) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Failed to stop channel\n", __func__);
+ chan_err(d40c, "Failed to stop channel\n");
return res;
}
d40c->phy_chan = NULL;
@@ -1622,6 +1661,7 @@ static int d40_free_dma(struct d40_chan *d40c)
static bool d40_is_paused(struct d40_chan *d40c)
{
+ void __iomem *chanbase = chan_base(d40c);
bool is_paused = false;
unsigned long flags;
void __iomem *active_reg;
@@ -1630,7 +1670,7 @@ static bool d40_is_paused(struct d40_chan *d40c)
spin_lock_irqsave(&d40c->lock, flags);
- if (d40c->log_num == D40_PHY_CHAN) {
+ if (chan_is_physical(d40c)) {
if (d40c->phy_chan->num % 2 == 0)
active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
else
@@ -1648,17 +1688,12 @@ static bool d40_is_paused(struct d40_chan *d40c)
if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
- status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDLNK);
+ status = readl(chanbase + D40_CHAN_REG_SDLNK);
} else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
- status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SSLNK);
+ status = readl(chanbase + D40_CHAN_REG_SSLNK);
} else {
- dev_err(&d40c->chan.dev->device,
- "[%s] Unknown direction\n", __func__);
+ chan_err(d40c, "Unknown direction\n");
goto _exit;
}
@@ -1688,114 +1723,184 @@ static u32 stedma40_residue(struct dma_chan *chan)
return bytes_left;
}
-struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
- struct scatterlist *sgl_dst,
- struct scatterlist *sgl_src,
- unsigned int sgl_len,
- unsigned long dma_flags)
+static int
+d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
+ struct scatterlist *sg_src, struct scatterlist *sg_dst,
+ unsigned int sg_len, dma_addr_t src_dev_addr,
+ dma_addr_t dst_dev_addr)
{
- int res;
- struct d40_desc *d40d;
- struct d40_chan *d40c = container_of(chan, struct d40_chan,
- chan);
- unsigned long flags;
+ struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
+ struct stedma40_half_channel_info *src_info = &cfg->src_info;
+ struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
+ int ret;
- if (d40c->phy_chan == NULL) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Unallocated channel.\n", __func__);
- return ERR_PTR(-EINVAL);
- }
+ ret = d40_log_sg_to_lli(sg_src, sg_len,
+ src_dev_addr,
+ desc->lli_log.src,
+ chan->log_def.lcsp1,
+ src_info->data_width,
+ dst_info->data_width);
- spin_lock_irqsave(&d40c->lock, flags);
- d40d = d40_desc_get(d40c);
+ ret = d40_log_sg_to_lli(sg_dst, sg_len,
+ dst_dev_addr,
+ desc->lli_log.dst,
+ chan->log_def.lcsp3,
+ dst_info->data_width,
+ src_info->data_width);
- if (d40d == NULL)
+ return ret < 0 ? ret : 0;
+}
+
+static int
+d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
+ struct scatterlist *sg_src, struct scatterlist *sg_dst,
+ unsigned int sg_len, dma_addr_t src_dev_addr,
+ dma_addr_t dst_dev_addr)
+{
+ struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
+ struct stedma40_half_channel_info *src_info = &cfg->src_info;
+ struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
+ unsigned long flags = 0;
+ int ret;
+
+ if (desc->cyclic)
+ flags |= LLI_CYCLIC | LLI_TERM_INT;
+
+ ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
+ desc->lli_phy.src,
+ virt_to_phys(desc->lli_phy.src),
+ chan->src_def_cfg,
+ src_info, dst_info, flags);
+
+ ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
+ desc->lli_phy.dst,
+ virt_to_phys(desc->lli_phy.dst),
+ chan->dst_def_cfg,
+ dst_info, src_info, flags);
+
+ dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
+ desc->lli_pool.size, DMA_TO_DEVICE);
+
+ return ret < 0 ? ret : 0;
+}
+
+
+static struct d40_desc *
+d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
+ unsigned int sg_len, unsigned long dma_flags)
+{
+ struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
+ struct d40_desc *desc;
+ int ret;
+
+ desc = d40_desc_get(chan);
+ if (!desc)
+ return NULL;
+
+ desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
+ cfg->dst_info.data_width);
+ if (desc->lli_len < 0) {
+ chan_err(chan, "Unaligned size\n");
goto err;
+ }
- d40d->lli_len = d40_sg_2_dmalen(sgl_dst, sgl_len,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width);
- if (d40d->lli_len < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Unaligned size\n", __func__);
+ ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
+ if (ret < 0) {
+ chan_err(chan, "Could not allocate lli\n");
goto err;
}
- d40d->lli_current = 0;
- d40d->txd.flags = dma_flags;
- if (d40c->log_num != D40_PHY_CHAN) {
+ desc->lli_current = 0;
+ desc->txd.flags = dma_flags;
+ desc->txd.tx_submit = d40_tx_submit;
- if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Out of memory\n", __func__);
- goto err;
- }
+ dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
- (void) d40_log_sg_to_lli(sgl_src,
- sgl_len,
- d40d->lli_log.src,
- d40c->log_def.lcsp1,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width);
-
- (void) d40_log_sg_to_lli(sgl_dst,
- sgl_len,
- d40d->lli_log.dst,
- d40c->log_def.lcsp3,
- d40c->dma_cfg.dst_info.data_width,
- d40c->dma_cfg.src_info.data_width);
- } else {
- if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Out of memory\n", __func__);
- goto err;
- }
+ return desc;
+
+err:
+ d40_desc_free(chan, desc);
+ return NULL;
+}
+
+static dma_addr_t
+d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
+{
+ struct stedma40_platform_data *plat = chan->base->plat_data;
+ struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
+ dma_addr_t addr;
- res = d40_phy_sg_to_lli(sgl_src,
- sgl_len,
- 0,
- d40d->lli_phy.src,
- virt_to_phys(d40d->lli_phy.src),
- d40c->src_def_cfg,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width,
- d40c->dma_cfg.src_info.psize);
+ if (chan->runtime_addr)
+ return chan->runtime_addr;
- if (res < 0)
- goto err;
+ if (direction == DMA_FROM_DEVICE)
+ addr = plat->dev_rx[cfg->src_dev_type];
+ else if (direction == DMA_TO_DEVICE)
+ addr = plat->dev_tx[cfg->dst_dev_type];
- res = d40_phy_sg_to_lli(sgl_dst,
- sgl_len,
- 0,
- d40d->lli_phy.dst,
- virt_to_phys(d40d->lli_phy.dst),
- d40c->dst_def_cfg,
- d40c->dma_cfg.dst_info.data_width,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.psize);
+ return addr;
+}
- if (res < 0)
- goto err;
+static struct dma_async_tx_descriptor *
+d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
+ struct scatterlist *sg_dst, unsigned int sg_len,
+ enum dma_data_direction direction, unsigned long dma_flags)
+{
+ struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
+ dma_addr_t src_dev_addr = 0;
+ dma_addr_t dst_dev_addr = 0;
+ struct d40_desc *desc;
+ unsigned long flags;
+ int ret;
- (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
- d40d->lli_pool.size, DMA_TO_DEVICE);
+ if (!chan->phy_chan) {
+ chan_err(chan, "Cannot prepare unallocated channel\n");
+ return NULL;
}
- dma_async_tx_descriptor_init(&d40d->txd, chan);
- d40d->txd.tx_submit = d40_tx_submit;
+ spin_lock_irqsave(&chan->lock, flags);
- spin_unlock_irqrestore(&d40c->lock, flags);
+ desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
+ if (desc == NULL)
+ goto err;
+
+ if (sg_next(&sg_src[sg_len - 1]) == sg_src)
+ desc->cyclic = true;
+
+ if (direction != DMA_NONE) {
+ dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
+
+ if (direction == DMA_FROM_DEVICE)
+ src_dev_addr = dev_addr;
+ else if (direction == DMA_TO_DEVICE)
+ dst_dev_addr = dev_addr;
+ }
+
+ if (chan_is_logical(chan))
+ ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
+ sg_len, src_dev_addr, dst_dev_addr);
+ else
+ ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
+ sg_len, src_dev_addr, dst_dev_addr);
+
+ if (ret) {
+ chan_err(chan, "Failed to prepare %s sg job: %d\n",
+ chan_is_logical(chan) ? "log" : "phy", ret);
+ goto err;
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return &desc->txd;
- return &d40d->txd;
err:
- if (d40d)
- d40_desc_free(d40c, d40d);
- spin_unlock_irqrestore(&d40c->lock, flags);
+ if (desc)
+ d40_desc_free(chan, desc);
+ spin_unlock_irqrestore(&chan->lock, flags);
return NULL;
}
-EXPORT_SYMBOL(stedma40_memcpy_sg);
bool stedma40_filter(struct dma_chan *chan, void *data)
{
@@ -1818,6 +1923,38 @@ bool stedma40_filter(struct dma_chan *chan, void *data)
}
EXPORT_SYMBOL(stedma40_filter);
+static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
+{
+ bool realtime = d40c->dma_cfg.realtime;
+ bool highprio = d40c->dma_cfg.high_priority;
+ u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1;
+ u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1;
+ u32 event = D40_TYPE_TO_EVENT(dev_type);
+ u32 group = D40_TYPE_TO_GROUP(dev_type);
+ u32 bit = 1 << event;
+
+ /* Destination event lines are stored in the upper halfword */
+ if (!src)
+ bit <<= 16;
+
+ writel(bit, d40c->base->virtbase + prioreg + group * 4);
+ writel(bit, d40c->base->virtbase + rtreg + group * 4);
+}
+
+static void d40_set_prio_realtime(struct d40_chan *d40c)
+{
+ if (d40c->base->rev < 3)
+ return;
+
+ if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
+ (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
+ __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true);
+
+ if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) ||
+ (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
+ __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false);
+}
+
/* DMA ENGINE functions */
static int d40_alloc_chan_resources(struct dma_chan *chan)
{
@@ -1834,9 +1971,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
if (!d40c->configured) {
err = d40_config_memcpy(d40c);
if (err) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Failed to configure memcpy channel\n",
- __func__);
+ chan_err(d40c, "Failed to configure memcpy channel\n");
goto fail;
}
}
@@ -1844,16 +1979,17 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
err = d40_allocate_channel(d40c);
if (err) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Failed to allocate channel\n", __func__);
+ chan_err(d40c, "Failed to allocate channel\n");
goto fail;
}
/* Fill in basic CFG register values */
d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
- &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
+ &d40c->dst_def_cfg, chan_is_logical(d40c));
- if (d40c->log_num != D40_PHY_CHAN) {
+ d40_set_prio_realtime(d40c);
+
+ if (chan_is_logical(d40c)) {
d40_log_cfg(&d40c->dma_cfg,
&d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
@@ -1886,8 +2022,7 @@ static void d40_free_chan_resources(struct dma_chan *chan)
unsigned long flags;
if (d40c->phy_chan == NULL) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Cannot free unallocated channel\n", __func__);
+ chan_err(d40c, "Cannot free unallocated channel\n");
return;
}
@@ -1897,8 +2032,7 @@ static void d40_free_chan_resources(struct dma_chan *chan)
err = d40_free_dma(d40c);
if (err)
- dev_err(&d40c->chan.dev->device,
- "[%s] Failed to free channel\n", __func__);
+ chan_err(d40c, "Failed to free channel\n");
spin_unlock_irqrestore(&d40c->lock, flags);
}
@@ -1908,251 +2042,31 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
size_t size,
unsigned long dma_flags)
{
- struct d40_desc *d40d;
- struct d40_chan *d40c = container_of(chan, struct d40_chan,
- chan);
- unsigned long flags;
-
- if (d40c->phy_chan == NULL) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Channel is not allocated.\n", __func__);
- return ERR_PTR(-EINVAL);
- }
-
- spin_lock_irqsave(&d40c->lock, flags);
- d40d = d40_desc_get(d40c);
-
- if (d40d == NULL) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Descriptor is NULL\n", __func__);
- goto err;
- }
+ struct scatterlist dst_sg;
+ struct scatterlist src_sg;
- d40d->txd.flags = dma_flags;
- d40d->lli_len = d40_size_2_dmalen(size,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width);
- if (d40d->lli_len < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Unaligned size\n", __func__);
- goto err;
- }
+ sg_init_table(&dst_sg, 1);
+ sg_init_table(&src_sg, 1);
+ sg_dma_address(&dst_sg) = dst;
+ sg_dma_address(&src_sg) = src;
- dma_async_tx_descriptor_init(&d40d->txd, chan);
+ sg_dma_len(&dst_sg) = size;
+ sg_dma_len(&src_sg) = size;
- d40d->txd.tx_submit = d40_tx_submit;
-
- if (d40c->log_num != D40_PHY_CHAN) {
-
- if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Out of memory\n", __func__);
- goto err;
- }
- d40d->lli_current = 0;
-
- if (d40_log_buf_to_lli(d40d->lli_log.src,
- src,
- size,
- d40c->log_def.lcsp1,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width,
- true) == NULL)
- goto err;
-
- if (d40_log_buf_to_lli(d40d->lli_log.dst,
- dst,
- size,
- d40c->log_def.lcsp3,
- d40c->dma_cfg.dst_info.data_width,
- d40c->dma_cfg.src_info.data_width,
- true) == NULL)
- goto err;
-
- } else {
-
- if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Out of memory\n", __func__);
- goto err;
- }
-
- if (d40_phy_buf_to_lli(d40d->lli_phy.src,
- src,
- size,
- d40c->dma_cfg.src_info.psize,
- 0,
- d40c->src_def_cfg,
- true,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width,
- false) == NULL)
- goto err;
-
- if (d40_phy_buf_to_lli(d40d->lli_phy.dst,
- dst,
- size,
- d40c->dma_cfg.dst_info.psize,
- 0,
- d40c->dst_def_cfg,
- true,
- d40c->dma_cfg.dst_info.data_width,
- d40c->dma_cfg.src_info.data_width,
- false) == NULL)
- goto err;
-
- (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
- d40d->lli_pool.size, DMA_TO_DEVICE);
- }
-
- spin_unlock_irqrestore(&d40c->lock, flags);
- return &d40d->txd;
-
-err:
- if (d40d)
- d40_desc_free(d40c, d40d);
- spin_unlock_irqrestore(&d40c->lock, flags);
- return NULL;
+ return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags);
}
static struct dma_async_tx_descriptor *
-d40_prep_sg(struct dma_chan *chan,
- struct scatterlist *dst_sg, unsigned int dst_nents,
- struct scatterlist *src_sg, unsigned int src_nents,
- unsigned long dma_flags)
+d40_prep_memcpy_sg(struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long dma_flags)
{
if (dst_nents != src_nents)
return NULL;
- return stedma40_memcpy_sg(chan, dst_sg, src_sg, dst_nents, dma_flags);
-}
-
-static int d40_prep_slave_sg_log(struct d40_desc *d40d,
- struct d40_chan *d40c,
- struct scatterlist *sgl,
- unsigned int sg_len,
- enum dma_data_direction direction,
- unsigned long dma_flags)
-{
- dma_addr_t dev_addr = 0;
- int total_size;
-
- d40d->lli_len = d40_sg_2_dmalen(sgl, sg_len,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width);
- if (d40d->lli_len < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Unaligned size\n", __func__);
- return -EINVAL;
- }
-
- if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Out of memory\n", __func__);
- return -ENOMEM;
- }
-
- d40d->lli_current = 0;
-
- if (direction == DMA_FROM_DEVICE)
- if (d40c->runtime_addr)
- dev_addr = d40c->runtime_addr;
- else
- dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
- else if (direction == DMA_TO_DEVICE)
- if (d40c->runtime_addr)
- dev_addr = d40c->runtime_addr;
- else
- dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
-
- else
- return -EINVAL;
-
- total_size = d40_log_sg_to_dev(sgl, sg_len,
- &d40d->lli_log,
- &d40c->log_def,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width,
- direction,
- dev_addr);
-
- if (total_size < 0)
- return -EINVAL;
-
- return 0;
-}
-
-static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
- struct d40_chan *d40c,
- struct scatterlist *sgl,
- unsigned int sgl_len,
- enum dma_data_direction direction,
- unsigned long dma_flags)
-{
- dma_addr_t src_dev_addr;
- dma_addr_t dst_dev_addr;
- int res;
-
- d40d->lli_len = d40_sg_2_dmalen(sgl, sgl_len,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width);
- if (d40d->lli_len < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Unaligned size\n", __func__);
- return -EINVAL;
- }
-
- if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Out of memory\n", __func__);
- return -ENOMEM;
- }
-
- d40d->lli_current = 0;
-
- if (direction == DMA_FROM_DEVICE) {
- dst_dev_addr = 0;
- if (d40c->runtime_addr)
- src_dev_addr = d40c->runtime_addr;
- else
- src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
- } else if (direction == DMA_TO_DEVICE) {
- if (d40c->runtime_addr)
- dst_dev_addr = d40c->runtime_addr;
- else
- dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
- src_dev_addr = 0;
- } else
- return -EINVAL;
-
- res = d40_phy_sg_to_lli(sgl,
- sgl_len,
- src_dev_addr,
- d40d->lli_phy.src,
- virt_to_phys(d40d->lli_phy.src),
- d40c->src_def_cfg,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.data_width,
- d40c->dma_cfg.src_info.psize);
- if (res < 0)
- return res;
-
- res = d40_phy_sg_to_lli(sgl,
- sgl_len,
- dst_dev_addr,
- d40d->lli_phy.dst,
- virt_to_phys(d40d->lli_phy.dst),
- d40c->dst_def_cfg,
- d40c->dma_cfg.dst_info.data_width,
- d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.dst_info.psize);
- if (res < 0)
- return res;
-
- (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
- d40d->lli_pool.size, DMA_TO_DEVICE);
- return 0;
+ return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
}
static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
@@ -2161,52 +2075,40 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
enum dma_data_direction direction,
unsigned long dma_flags)
{
- struct d40_desc *d40d;
- struct d40_chan *d40c = container_of(chan, struct d40_chan,
- chan);
- unsigned long flags;
- int err;
-
- if (d40c->phy_chan == NULL) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Cannot prepare unallocated channel\n", __func__);
- return ERR_PTR(-EINVAL);
- }
+ if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE)
+ return NULL;
- spin_lock_irqsave(&d40c->lock, flags);
- d40d = d40_desc_get(d40c);
+ return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
+}
- if (d40d == NULL)
- goto err;
+static struct dma_async_tx_descriptor *
+dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_data_direction direction)
+{
+ unsigned int periods = buf_len / period_len;
+ struct dma_async_tx_descriptor *txd;
+ struct scatterlist *sg;
+ int i;
- if (d40c->log_num != D40_PHY_CHAN)
- err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
- direction, dma_flags);
- else
- err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
- direction, dma_flags);
- if (err) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Failed to prepare %s slave sg job: %d\n",
- __func__,
- d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
- goto err;
+ sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_KERNEL);
+ for (i = 0; i < periods; i++) {
+ sg_dma_address(&sg[i]) = dma_addr;
+ sg_dma_len(&sg[i]) = period_len;
+ dma_addr += period_len;
}
- d40d->txd.flags = dma_flags;
+ sg[periods].offset = 0;
+ sg[periods].length = 0;
+ sg[periods].page_link =
+ ((unsigned long)sg | 0x01) & ~0x02;
- dma_async_tx_descriptor_init(&d40d->txd, chan);
+ txd = d40_prep_sg(chan, sg, sg, periods, direction,
+ DMA_PREP_INTERRUPT);
- d40d->txd.tx_submit = d40_tx_submit;
+ kfree(sg);
- spin_unlock_irqrestore(&d40c->lock, flags);
- return &d40d->txd;
-
-err:
- if (d40d)
- d40_desc_free(d40c, d40d);
- spin_unlock_irqrestore(&d40c->lock, flags);
- return NULL;
+ return txd;
}
static enum dma_status d40_tx_status(struct dma_chan *chan,
@@ -2219,9 +2121,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan,
int ret;
if (d40c->phy_chan == NULL) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Cannot read status of unallocated channel\n",
- __func__);
+ chan_err(d40c, "Cannot read status of unallocated channel\n");
return -EINVAL;
}
@@ -2245,8 +2145,7 @@ static void d40_issue_pending(struct dma_chan *chan)
unsigned long flags;
if (d40c->phy_chan == NULL) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Channel is not allocated!\n", __func__);
+ chan_err(d40c, "Channel is not allocated!\n");
return;
}
@@ -2339,7 +2238,7 @@ static void d40_set_runtime_config(struct dma_chan *chan,
return;
}
- if (d40c->log_num != D40_PHY_CHAN) {
+ if (chan_is_logical(d40c)) {
if (config_maxburst >= 16)
psize = STEDMA40_PSIZE_LOG_16;
else if (config_maxburst >= 8)
@@ -2372,7 +2271,7 @@ static void d40_set_runtime_config(struct dma_chan *chan,
cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
/* Fill in register values */
- if (d40c->log_num != D40_PHY_CHAN)
+ if (chan_is_logical(d40c))
d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
else
d40_phy_cfg(cfg, &d40c->src_def_cfg,
@@ -2393,25 +2292,20 @@ static void d40_set_runtime_config(struct dma_chan *chan,
static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{
- unsigned long flags;
struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
if (d40c->phy_chan == NULL) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Channel is not allocated!\n", __func__);
+ chan_err(d40c, "Channel is not allocated!\n");
return -EINVAL;
}
switch (cmd) {
case DMA_TERMINATE_ALL:
- spin_lock_irqsave(&d40c->lock, flags);
- d40_term_all(d40c);
- spin_unlock_irqrestore(&d40c->lock, flags);
- return 0;
+ return d40_terminate_all(d40c);
case DMA_PAUSE:
- return d40_pause(chan);
+ return d40_pause(d40c);
case DMA_RESUME:
- return d40_resume(chan);
+ return d40_resume(d40c);
case DMA_SLAVE_CONFIG:
d40_set_runtime_config(chan,
(struct dma_slave_config *) arg);
@@ -2456,6 +2350,35 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
}
}
+static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
+{
+ if (dma_has_cap(DMA_SLAVE, dev->cap_mask))
+ dev->device_prep_slave_sg = d40_prep_slave_sg;
+
+ if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
+ dev->device_prep_dma_memcpy = d40_prep_memcpy;
+
+ /*
+ * This controller can only access address at even
+ * 32bit boundaries, i.e. 2^2
+ */
+ dev->copy_align = 2;
+ }
+
+ if (dma_has_cap(DMA_SG, dev->cap_mask))
+ dev->device_prep_dma_sg = d40_prep_memcpy_sg;
+
+ if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
+ dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
+
+ dev->device_alloc_chan_resources = d40_alloc_chan_resources;
+ dev->device_free_chan_resources = d40_free_chan_resources;
+ dev->device_issue_pending = d40_issue_pending;
+ dev->device_tx_status = d40_tx_status;
+ dev->device_control = d40_control;
+ dev->dev = base->dev;
+}
+
static int __init d40_dmaengine_init(struct d40_base *base,
int num_reserved_chans)
{
@@ -2466,23 +2389,14 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_zero(base->dma_slave.cap_mask);
dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
+ dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
- base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
- base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
- base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
- base->dma_slave.device_prep_dma_sg = d40_prep_sg;
- base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
- base->dma_slave.device_tx_status = d40_tx_status;
- base->dma_slave.device_issue_pending = d40_issue_pending;
- base->dma_slave.device_control = d40_control;
- base->dma_slave.dev = base->dev;
+ d40_ops_init(base, &base->dma_slave);
err = dma_async_device_register(&base->dma_slave);
if (err) {
- dev_err(base->dev,
- "[%s] Failed to register slave channels\n",
- __func__);
+ d40_err(base->dev, "Failed to register slave channels\n");
goto failure1;
}
@@ -2491,29 +2405,15 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_zero(base->dma_memcpy.cap_mask);
dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
- dma_cap_set(DMA_SG, base->dma_slave.cap_mask);
-
- base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
- base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
- base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
- base->dma_slave.device_prep_dma_sg = d40_prep_sg;
- base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
- base->dma_memcpy.device_tx_status = d40_tx_status;
- base->dma_memcpy.device_issue_pending = d40_issue_pending;
- base->dma_memcpy.device_control = d40_control;
- base->dma_memcpy.dev = base->dev;
- /*
- * This controller can only access address at even
- * 32bit boundaries, i.e. 2^2
- */
- base->dma_memcpy.copy_align = 2;
+ dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
+
+ d40_ops_init(base, &base->dma_memcpy);
err = dma_async_device_register(&base->dma_memcpy);
if (err) {
- dev_err(base->dev,
- "[%s] Failed to regsiter memcpy only channels\n",
- __func__);
+ d40_err(base->dev,
+ "Failed to regsiter memcpy only channels\n");
goto failure2;
}
@@ -2523,24 +2423,15 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_zero(base->dma_both.cap_mask);
dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
- dma_cap_set(DMA_SG, base->dma_slave.cap_mask);
-
- base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
- base->dma_both.device_free_chan_resources = d40_free_chan_resources;
- base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
- base->dma_slave.device_prep_dma_sg = d40_prep_sg;
- base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
- base->dma_both.device_tx_status = d40_tx_status;
- base->dma_both.device_issue_pending = d40_issue_pending;
- base->dma_both.device_control = d40_control;
- base->dma_both.dev = base->dev;
- base->dma_both.copy_align = 2;
+ dma_cap_set(DMA_SG, base->dma_both.cap_mask);
+ dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
+
+ d40_ops_init(base, &base->dma_both);
err = dma_async_device_register(&base->dma_both);
if (err) {
- dev_err(base->dev,
- "[%s] Failed to register logical and physical capable channels\n",
- __func__);
+ d40_err(base->dev,
+ "Failed to register logical and physical capable channels\n");
goto failure3;
}
return 0;
@@ -2616,9 +2507,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
{ .reg = D40_DREG_PERIPHID1, .val = 0x0000},
/*
* D40_DREG_PERIPHID2 Depends on HW revision:
- * MOP500/HREF ED has 0x0008,
+ * DB8500ed has 0x0008,
* ? has 0x0018,
- * HREF V1 has 0x0028
+ * DB8500v1 has 0x0028
+ * DB8500v2 has 0x0038
*/
{ .reg = D40_DREG_PERIPHID3, .val = 0x0000},
@@ -2642,8 +2534,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "[%s] No matching clock found\n",
- __func__);
+ d40_err(&pdev->dev, "No matching clock found\n");
goto failure;
}
@@ -2666,9 +2557,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
if (dma_id_regs[i].val !=
readl(virtbase + dma_id_regs[i].reg)) {
- dev_err(&pdev->dev,
- "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
- __func__,
+ d40_err(&pdev->dev,
+ "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
dma_id_regs[i].val,
dma_id_regs[i].reg,
readl(virtbase + dma_id_regs[i].reg));
@@ -2681,9 +2571,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) !=
D40_HW_DESIGNER) {
- dev_err(&pdev->dev,
- "[%s] Unknown designer! Got %x wanted %x\n",
- __func__, val & D40_DREG_PERIPHID2_DESIGNER_MASK,
+ d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
+ val & D40_DREG_PERIPHID2_DESIGNER_MASK,
D40_HW_DESIGNER);
goto failure;
}
@@ -2713,7 +2602,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
sizeof(struct d40_chan), GFP_KERNEL);
if (base == NULL) {
- dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
+ d40_err(&pdev->dev, "Out of memory\n");
goto failure;
}
@@ -2860,6 +2749,7 @@ static void __init d40_hw_init(struct d40_base *base)
static int __init d40_lcla_allocate(struct d40_base *base)
{
+ struct d40_lcla_pool *pool = &base->lcla_pool;
unsigned long *page_list;
int i, j;
int ret = 0;
@@ -2885,9 +2775,8 @@ static int __init d40_lcla_allocate(struct d40_base *base)
base->lcla_pool.pages);
if (!page_list[i]) {
- dev_err(base->dev,
- "[%s] Failed to allocate %d pages.\n",
- __func__, base->lcla_pool.pages);
+ d40_err(base->dev, "Failed to allocate %d pages.\n",
+ base->lcla_pool.pages);
for (j = 0; j < i; j++)
free_pages(page_list[j], base->lcla_pool.pages);
@@ -2925,6 +2814,15 @@ static int __init d40_lcla_allocate(struct d40_base *base)
LCLA_ALIGNMENT);
}
+ pool->dma_addr = dma_map_single(base->dev, pool->base,
+ SZ_1K * base->num_phy_chans,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(base->dev, pool->dma_addr)) {
+ pool->dma_addr = 0;
+ ret = -ENOMEM;
+ goto failure;
+ }
+
writel(virt_to_phys(base->lcla_pool.base),
base->virtbase + D40_DREG_LCLA);
failure:
@@ -2957,9 +2855,7 @@ static int __init d40_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
if (!res) {
ret = -ENOENT;
- dev_err(&pdev->dev,
- "[%s] No \"lcpa\" memory resource\n",
- __func__);
+ d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
goto failure;
}
base->lcpa_size = resource_size(res);
@@ -2968,9 +2864,9 @@ static int __init d40_probe(struct platform_device *pdev)
if (request_mem_region(res->start, resource_size(res),
D40_NAME " I/O lcpa") == NULL) {
ret = -EBUSY;
- dev_err(&pdev->dev,
- "[%s] Failed to request LCPA region 0x%x-0x%x\n",
- __func__, res->start, res->end);
+ d40_err(&pdev->dev,
+ "Failed to request LCPA region 0x%x-0x%x\n",
+ res->start, res->end);
goto failure;
}
@@ -2986,16 +2882,13 @@ static int __init d40_probe(struct platform_device *pdev)
base->lcpa_base = ioremap(res->start, resource_size(res));
if (!base->lcpa_base) {
ret = -ENOMEM;
- dev_err(&pdev->dev,
- "[%s] Failed to ioremap LCPA region\n",
- __func__);
+ d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
goto failure;
}
ret = d40_lcla_allocate(base);
if (ret) {
- dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n",
- __func__);
+ d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
goto failure;
}
@@ -3004,9 +2897,8 @@ static int __init d40_probe(struct platform_device *pdev)
base->irq = platform_get_irq(pdev, 0);
ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
-
if (ret) {
- dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
+ d40_err(&pdev->dev, "No IRQ defined\n");
goto failure;
}
@@ -3025,6 +2917,12 @@ failure:
kmem_cache_destroy(base->desc_slab);
if (base->virtbase)
iounmap(base->virtbase);
+
+ if (base->lcla_pool.dma_addr)
+ dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
+ SZ_1K * base->num_phy_chans,
+ DMA_TO_DEVICE);
+
if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
free_pages((unsigned long)base->lcla_pool.base,
base->lcla_pool.pages);
@@ -3049,7 +2947,7 @@ failure:
kfree(base);
}
- dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
+ d40_err(&pdev->dev, "probe failed\n");
return ret;
}
@@ -3060,7 +2958,7 @@ static struct platform_driver d40_driver = {
},
};
-int __init stedma40_init(void)
+static int __init stedma40_init(void)
{
return platform_driver_probe(&d40_driver, d40_probe);
}
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index 0b096a38322..cad9e1daedf 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -125,13 +125,15 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
static int d40_phy_fill_lli(struct d40_phy_lli *lli,
dma_addr_t data,
u32 data_size,
- int psize,
dma_addr_t next_lli,
u32 reg_cfg,
- bool term_int,
- u32 data_width,
- bool is_device)
+ struct stedma40_half_channel_info *info,
+ unsigned int flags)
{
+ bool addr_inc = flags & LLI_ADDR_INC;
+ bool term_int = flags & LLI_TERM_INT;
+ unsigned int data_width = info->data_width;
+ int psize = info->psize;
int num_elems;
if (psize == STEDMA40_PSIZE_PHY_1)
@@ -154,7 +156,7 @@ static int d40_phy_fill_lli(struct d40_phy_lli *lli,
* Distance to next element sized entry.
* Usually the size of the element unless you want gaps.
*/
- if (!is_device)
+ if (addr_inc)
lli->reg_elt |= (0x1 << data_width) <<
D40_SREG_ELEM_PHY_EIDX_POS;
@@ -198,47 +200,51 @@ static int d40_seg_size(int size, int data_width1, int data_width2)
return seg_max;
}
-struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli,
- dma_addr_t addr,
- u32 size,
- int psize,
- dma_addr_t lli_phys,
- u32 reg_cfg,
- bool term_int,
- u32 data_width1,
- u32 data_width2,
- bool is_device)
+static struct d40_phy_lli *
+d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size,
+ dma_addr_t lli_phys, dma_addr_t first_phys, u32 reg_cfg,
+ struct stedma40_half_channel_info *info,
+ struct stedma40_half_channel_info *otherinfo,
+ unsigned long flags)
{
+ bool lastlink = flags & LLI_LAST_LINK;
+ bool addr_inc = flags & LLI_ADDR_INC;
+ bool term_int = flags & LLI_TERM_INT;
+ bool cyclic = flags & LLI_CYCLIC;
int err;
dma_addr_t next = lli_phys;
int size_rest = size;
int size_seg = 0;
+ /*
+ * This piece may be split up based on d40_seg_size(); we only want the
+ * term int on the last part.
+ */
+ if (term_int)
+ flags &= ~LLI_TERM_INT;
+
do {
- size_seg = d40_seg_size(size_rest, data_width1, data_width2);
+ size_seg = d40_seg_size(size_rest, info->data_width,
+ otherinfo->data_width);
size_rest -= size_seg;
- if (term_int && size_rest == 0)
- next = 0;
+ if (size_rest == 0 && term_int)
+ flags |= LLI_TERM_INT;
+
+ if (size_rest == 0 && lastlink)
+ next = cyclic ? first_phys : 0;
else
next = ALIGN(next + sizeof(struct d40_phy_lli),
D40_LLI_ALIGN);
- err = d40_phy_fill_lli(lli,
- addr,
- size_seg,
- psize,
- next,
- reg_cfg,
- !next,
- data_width1,
- is_device);
+ err = d40_phy_fill_lli(lli, addr, size_seg, next,
+ reg_cfg, info, flags);
if (err)
goto err;
lli++;
- if (!is_device)
+ if (addr_inc)
addr += size_seg;
} while (size_rest);
@@ -254,39 +260,35 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
struct d40_phy_lli *lli_sg,
dma_addr_t lli_phys,
u32 reg_cfg,
- u32 data_width1,
- u32 data_width2,
- int psize)
+ struct stedma40_half_channel_info *info,
+ struct stedma40_half_channel_info *otherinfo,
+ unsigned long flags)
{
int total_size = 0;
int i;
struct scatterlist *current_sg = sg;
- dma_addr_t dst;
struct d40_phy_lli *lli = lli_sg;
dma_addr_t l_phys = lli_phys;
+ if (!target)
+ flags |= LLI_ADDR_INC;
+
for_each_sg(sg, current_sg, sg_len, i) {
+ dma_addr_t sg_addr = sg_dma_address(current_sg);
+ unsigned int len = sg_dma_len(current_sg);
+ dma_addr_t dst = target ?: sg_addr;
total_size += sg_dma_len(current_sg);
- if (target)
- dst = target;
- else
- dst = sg_phys(current_sg);
+ if (i == sg_len - 1)
+ flags |= LLI_TERM_INT | LLI_LAST_LINK;
l_phys = ALIGN(lli_phys + (lli - lli_sg) *
sizeof(struct d40_phy_lli), D40_LLI_ALIGN);
- lli = d40_phy_buf_to_lli(lli,
- dst,
- sg_dma_len(current_sg),
- psize,
- l_phys,
- reg_cfg,
- sg_len - 1 == i,
- data_width1,
- data_width2,
- target == dst);
+ lli = d40_phy_buf_to_lli(lli, dst, len, l_phys, lli_phys,
+ reg_cfg, info, otherinfo, flags);
+
if (lli == NULL)
return -EINVAL;
}
@@ -295,45 +297,22 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
}
-void d40_phy_lli_write(void __iomem *virtbase,
- u32 phy_chan_num,
- struct d40_phy_lli *lli_dst,
- struct d40_phy_lli *lli_src)
-{
-
- writel(lli_src->reg_cfg, virtbase + D40_DREG_PCBASE +
- phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSCFG);
- writel(lli_src->reg_elt, virtbase + D40_DREG_PCBASE +
- phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
- writel(lli_src->reg_ptr, virtbase + D40_DREG_PCBASE +
- phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSPTR);
- writel(lli_src->reg_lnk, virtbase + D40_DREG_PCBASE +
- phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSLNK);
-
- writel(lli_dst->reg_cfg, virtbase + D40_DREG_PCBASE +
- phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDCFG);
- writel(lli_dst->reg_elt, virtbase + D40_DREG_PCBASE +
- phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
- writel(lli_dst->reg_ptr, virtbase + D40_DREG_PCBASE +
- phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDPTR);
- writel(lli_dst->reg_lnk, virtbase + D40_DREG_PCBASE +
- phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDLNK);
-
-}
-
/* DMA logical lli operations */
static void d40_log_lli_link(struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src,
- int next)
+ int next, unsigned int flags)
{
+ bool interrupt = flags & LLI_TERM_INT;
u32 slos = 0;
u32 dlos = 0;
if (next != -EINVAL) {
slos = next * 2;
dlos = next * 2 + 1;
- } else {
+ }
+
+ if (interrupt) {
lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
}
@@ -348,9 +327,9 @@ static void d40_log_lli_link(struct d40_log_lli *lli_dst,
void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src,
- int next)
+ int next, unsigned int flags)
{
- d40_log_lli_link(lli_dst, lli_src, next);
+ d40_log_lli_link(lli_dst, lli_src, next, flags);
writel(lli_src->lcsp02, &lcpa[0].lcsp0);
writel(lli_src->lcsp13, &lcpa[0].lcsp1);
@@ -361,9 +340,9 @@ void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src,
- int next)
+ int next, unsigned int flags)
{
- d40_log_lli_link(lli_dst, lli_src, next);
+ d40_log_lli_link(lli_dst, lli_src, next, flags);
writel(lli_src->lcsp02, &lcla[0].lcsp02);
writel(lli_src->lcsp13, &lcla[0].lcsp13);
@@ -375,8 +354,10 @@ static void d40_log_fill_lli(struct d40_log_lli *lli,
dma_addr_t data, u32 data_size,
u32 reg_cfg,
u32 data_width,
- bool addr_inc)
+ unsigned int flags)
{
+ bool addr_inc = flags & LLI_ADDR_INC;
+
lli->lcsp13 = reg_cfg;
/* The number of elements to transfer */
@@ -395,67 +376,15 @@ static void d40_log_fill_lli(struct d40_log_lli *lli,
}
-int d40_log_sg_to_dev(struct scatterlist *sg,
- int sg_len,
- struct d40_log_lli_bidir *lli,
- struct d40_def_lcsp *lcsp,
- u32 src_data_width,
- u32 dst_data_width,
- enum dma_data_direction direction,
- dma_addr_t dev_addr)
-{
- int total_size = 0;
- struct scatterlist *current_sg = sg;
- int i;
- struct d40_log_lli *lli_src = lli->src;
- struct d40_log_lli *lli_dst = lli->dst;
-
- for_each_sg(sg, current_sg, sg_len, i) {
- total_size += sg_dma_len(current_sg);
-
- if (direction == DMA_TO_DEVICE) {
- lli_src =
- d40_log_buf_to_lli(lli_src,
- sg_phys(current_sg),
- sg_dma_len(current_sg),
- lcsp->lcsp1, src_data_width,
- dst_data_width,
- true);
- lli_dst =
- d40_log_buf_to_lli(lli_dst,
- dev_addr,
- sg_dma_len(current_sg),
- lcsp->lcsp3, dst_data_width,
- src_data_width,
- false);
- } else {
- lli_dst =
- d40_log_buf_to_lli(lli_dst,
- sg_phys(current_sg),
- sg_dma_len(current_sg),
- lcsp->lcsp3, dst_data_width,
- src_data_width,
- true);
- lli_src =
- d40_log_buf_to_lli(lli_src,
- dev_addr,
- sg_dma_len(current_sg),
- lcsp->lcsp1, src_data_width,
- dst_data_width,
- false);
- }
- }
- return total_size;
-}
-
-struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
+static struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
dma_addr_t addr,
int size,
u32 lcsp13, /* src or dst*/
u32 data_width1,
u32 data_width2,
- bool addr_inc)
+ unsigned int flags)
{
+ bool addr_inc = flags & LLI_ADDR_INC;
struct d40_log_lli *lli = lli_sg;
int size_rest = size;
int size_seg = 0;
@@ -468,7 +397,7 @@ struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
addr,
size_seg,
lcsp13, data_width1,
- addr_inc);
+ flags);
if (addr_inc)
addr += size_seg;
lli++;
@@ -479,6 +408,7 @@ struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
int d40_log_sg_to_lli(struct scatterlist *sg,
int sg_len,
+ dma_addr_t dev_addr,
struct d40_log_lli *lli_sg,
u32 lcsp13, /* src or dst*/
u32 data_width1, u32 data_width2)
@@ -487,14 +417,24 @@ int d40_log_sg_to_lli(struct scatterlist *sg,
struct scatterlist *current_sg = sg;
int i;
struct d40_log_lli *lli = lli_sg;
+ unsigned long flags = 0;
+
+ if (!dev_addr)
+ flags |= LLI_ADDR_INC;
for_each_sg(sg, current_sg, sg_len, i) {
+ dma_addr_t sg_addr = sg_dma_address(current_sg);
+ unsigned int len = sg_dma_len(current_sg);
+ dma_addr_t addr = dev_addr ?: sg_addr;
+
total_size += sg_dma_len(current_sg);
- lli = d40_log_buf_to_lli(lli,
- sg_phys(current_sg),
- sg_dma_len(current_sg),
+
+ lli = d40_log_buf_to_lli(lli, addr, len,
lcsp13,
- data_width1, data_width2, true);
+ data_width1,
+ data_width2,
+ flags);
}
+
return total_size;
}
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 9cc43495bea..195ee65ee7f 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -163,6 +163,22 @@
#define D40_DREG_LCEIS1 0x0B4
#define D40_DREG_LCEIS2 0x0B8
#define D40_DREG_LCEIS3 0x0BC
+#define D40_DREG_PSEG1 0x110
+#define D40_DREG_PSEG2 0x114
+#define D40_DREG_PSEG3 0x118
+#define D40_DREG_PSEG4 0x11C
+#define D40_DREG_PCEG1 0x120
+#define D40_DREG_PCEG2 0x124
+#define D40_DREG_PCEG3 0x128
+#define D40_DREG_PCEG4 0x12C
+#define D40_DREG_RSEG1 0x130
+#define D40_DREG_RSEG2 0x134
+#define D40_DREG_RSEG3 0x138
+#define D40_DREG_RSEG4 0x13C
+#define D40_DREG_RCEG1 0x140
+#define D40_DREG_RCEG2 0x144
+#define D40_DREG_RCEG3 0x148
+#define D40_DREG_RCEG4 0x14C
#define D40_DREG_STFU 0xFC8
#define D40_DREG_ICFG 0xFCC
#define D40_DREG_PERIPHID0 0xFE0
@@ -277,6 +293,13 @@ struct d40_def_lcsp {
/* Physical channels */
+enum d40_lli_flags {
+ LLI_ADDR_INC = 1 << 0,
+ LLI_TERM_INT = 1 << 1,
+ LLI_CYCLIC = 1 << 2,
+ LLI_LAST_LINK = 1 << 3,
+};
+
void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
u32 *src_cfg,
u32 *dst_cfg,
@@ -292,46 +315,15 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
struct d40_phy_lli *lli,
dma_addr_t lli_phys,
u32 reg_cfg,
- u32 data_width1,
- u32 data_width2,
- int psize);
-
-struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli,
- dma_addr_t data,
- u32 data_size,
- int psize,
- dma_addr_t next_lli,
- u32 reg_cfg,
- bool term_int,
- u32 data_width1,
- u32 data_width2,
- bool is_device);
-
-void d40_phy_lli_write(void __iomem *virtbase,
- u32 phy_chan_num,
- struct d40_phy_lli *lli_dst,
- struct d40_phy_lli *lli_src);
+ struct stedma40_half_channel_info *info,
+ struct stedma40_half_channel_info *otherinfo,
+ unsigned long flags);
/* Logical channels */
-struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
- dma_addr_t addr,
- int size,
- u32 lcsp13, /* src or dst*/
- u32 data_width1, u32 data_width2,
- bool addr_inc);
-
-int d40_log_sg_to_dev(struct scatterlist *sg,
- int sg_len,
- struct d40_log_lli_bidir *lli,
- struct d40_def_lcsp *lcsp,
- u32 src_data_width,
- u32 dst_data_width,
- enum dma_data_direction direction,
- dma_addr_t dev_addr);
-
int d40_log_sg_to_lli(struct scatterlist *sg,
int sg_len,
+ dma_addr_t dev_addr,
struct d40_log_lli *lli_sg,
u32 lcsp13, /* src or dst*/
u32 data_width1, u32 data_width2);
@@ -339,11 +331,11 @@ int d40_log_sg_to_lli(struct scatterlist *sg,
void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src,
- int next);
+ int next, unsigned int flags);
void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src,
- int next);
+ int next, unsigned int flags);
#endif /* STE_DMA40_LLI_H */
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 3b88a4e7c98..ea8705b64d7 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -27,6 +27,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/slab.h>
#include <linux/timb_dma.h>
@@ -684,7 +685,7 @@ static irqreturn_t td_irq(int irq, void *devid)
static int __devinit td_probe(struct platform_device *pdev)
{
- struct timb_dma_platform_data *pdata = pdev->dev.platform_data;
+ struct timb_dma_platform_data *pdata = mfd_get_data(pdev);
struct timb_dma *td;
struct resource *iomem;
int irq;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 664660e5633..5a182f30bf7 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -411,4 +411,9 @@ config GPIO_JANZ_TTL
This driver provides support for driving the pins in output
mode only. Input mode is not supported.
+config AB8500_GPIO
+ bool "ST-Ericsson AB8500 Mixed Signal Circuit gpio functions"
+ depends on AB8500_CORE
+ help
+ Select this to enable the AB8500 IC GPIO driver
endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 3351cf87b0e..becef595435 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -42,3 +42,4 @@ obj-$(CONFIG_GPIO_JANZ_TTL) += janz-ttl.o
obj-$(CONFIG_GPIO_SX150X) += sx150x.o
obj-$(CONFIG_GPIO_VX855) += vx855_gpio.o
obj-$(CONFIG_GPIO_ML_IOH) += ml_ioh_gpio.o
+obj-$(CONFIG_AB8500_GPIO) += ab8500-gpio.o
diff --git a/drivers/gpio/ab8500-gpio.c b/drivers/gpio/ab8500-gpio.c
new file mode 100644
index 00000000000..e7b834d054b
--- /dev/null
+++ b/drivers/gpio/ab8500-gpio.c
@@ -0,0 +1,522 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: BIBEK BASU <bibek.basu@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/ab8500.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/ab8500/gpio.h>
+
+/*
+ * GPIO registers offset
+ * Bank: 0x10
+ */
+#define AB8500_GPIO_SEL1_REG 0x00
+#define AB8500_GPIO_SEL2_REG 0x01
+#define AB8500_GPIO_SEL3_REG 0x02
+#define AB8500_GPIO_SEL4_REG 0x03
+#define AB8500_GPIO_SEL5_REG 0x04
+#define AB8500_GPIO_SEL6_REG 0x05
+
+#define AB8500_GPIO_DIR1_REG 0x10
+#define AB8500_GPIO_DIR2_REG 0x11
+#define AB8500_GPIO_DIR3_REG 0x12
+#define AB8500_GPIO_DIR4_REG 0x13
+#define AB8500_GPIO_DIR5_REG 0x14
+#define AB8500_GPIO_DIR6_REG 0x15
+
+#define AB8500_GPIO_OUT1_REG 0x20
+#define AB8500_GPIO_OUT2_REG 0x21
+#define AB8500_GPIO_OUT3_REG 0x22
+#define AB8500_GPIO_OUT4_REG 0x23
+#define AB8500_GPIO_OUT5_REG 0x24
+#define AB8500_GPIO_OUT6_REG 0x25
+
+#define AB8500_GPIO_PUD1_REG 0x30
+#define AB8500_GPIO_PUD2_REG 0x31
+#define AB8500_GPIO_PUD3_REG 0x32
+#define AB8500_GPIO_PUD4_REG 0x33
+#define AB8500_GPIO_PUD5_REG 0x34
+#define AB8500_GPIO_PUD6_REG 0x35
+
+#define AB8500_GPIO_IN1_REG 0x40
+#define AB8500_GPIO_IN2_REG 0x41
+#define AB8500_GPIO_IN3_REG 0x42
+#define AB8500_GPIO_IN4_REG 0x43
+#define AB8500_GPIO_IN5_REG 0x44
+#define AB8500_GPIO_IN6_REG 0x45
+#define AB8500_GPIO_ALTFUN_REG 0x45
+#define ALTFUN_REG_INDEX 6
+#define AB8500_NUM_GPIO 42
+#define AB8500_NUM_VIR_GPIO_IRQ 16
+
+enum ab8500_gpio_action {
+ NONE,
+ STARTUP,
+ SHUTDOWN,
+ MASK,
+ UNMASK
+};
+
+struct ab8500_gpio {
+ struct gpio_chip chip;
+ struct ab8500 *parent;
+ struct device *dev;
+ struct mutex lock;
+ u32 irq_base;
+ enum ab8500_gpio_action irq_action;
+ u16 rising;
+ u16 falling;
+};
+/**
+ * to_ab8500_gpio() - get the pointer to ab8500_gpio
+ * @chip: Member of the structure ab8500_gpio
+ */
+static inline struct ab8500_gpio *to_ab8500_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct ab8500_gpio, chip);
+}
+
+static int ab8500_gpio_set_bits(struct gpio_chip *chip, u8 reg,
+ unsigned offset, int val)
+{
+ struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
+ u8 pos = offset % 8;
+ int ret;
+
+ reg = reg + (offset / 8);
+ ret = abx500_mask_and_set_register_interruptible(ab8500_gpio->dev,
+ AB8500_MISC, reg, 1 << pos, val << pos);
+ if (ret < 0)
+ dev_err(ab8500_gpio->dev, "%s write failed\n", __func__);
+ return ret;
+}
+/**
+ * ab8500_gpio_get() - Get the particular GPIO value
+ * @chip: Gpio device
+ * @offset: GPIO number to read
+ */
+static int ab8500_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
+ u8 mask = 1 << (offset % 8);
+ u8 reg = AB8500_GPIO_OUT1_REG + (offset / 8);
+ int ret;
+ u8 data;
+ ret = abx500_get_register_interruptible(ab8500_gpio->dev, AB8500_MISC,
+ reg, &data);
+ if (ret < 0) {
+ dev_err(ab8500_gpio->dev, "%s read failed\n", __func__);
+ return ret;
+ }
+ return (data & mask) >> (offset % 8);
+}
+
+static void ab8500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+{
+ struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
+ int ret;
+ /* Write the data */
+ ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, 1);
+ if (ret < 0)
+ dev_err(ab8500_gpio->dev, "%s write failed\n", __func__);
+}
+
+static int ab8500_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+ int val)
+{
+ int ret;
+ /* set direction as output */
+ ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_DIR1_REG, offset, 1);
+ if (ret < 0)
+ return ret;
+ /* disable pull down */
+ ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_PUD1_REG, offset, 1);
+ if (ret < 0)
+ return ret;
+ /* set the output as 1 or 0 */
+ return ab8500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, val);
+
+}
+
+static int ab8500_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ /* set the register as input */
+ return ab8500_gpio_set_bits(chip, AB8500_GPIO_DIR1_REG, offset, 0);
+}
+
+static int ab8500_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ /*
+ * Only some GPIOs are interrupt capable, and they are
+ * organized in discontiguous clusters:
+ *
+ * GPIO6 to GPIO13
+ * GPIO24 and GPIO25
+ * GPIO36 to GPIO41
+ */
+ static struct ab8500_gpio_irq_cluster {
+ int start;
+ int end;
+ } clusters[] = {
+ {.start = 6, .end = 13},
+ {.start = 24, .end = 25},
+ {.start = 36, .end = 41},
+ };
+ struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
+ int base = ab8500_gpio->irq_base;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(clusters); i++) {
+ struct ab8500_gpio_irq_cluster *cluster = &clusters[i];
+
+ if (offset >= cluster->start && offset <= cluster->end)
+ return base + offset - cluster->start;
+
+ /* Advance by the number of gpios in this cluster */
+ base += cluster->end - cluster->start + 1;
+ }
+
+ return -EINVAL;
+}
+
+static struct gpio_chip ab8500gpio_chip = {
+ .label = "ab8500_gpio",
+ .owner = THIS_MODULE,
+ .direction_input = ab8500_gpio_direction_input,
+ .get = ab8500_gpio_get,
+ .direction_output = ab8500_gpio_direction_output,
+ .set = ab8500_gpio_set,
+ .to_irq = ab8500_gpio_to_irq,
+};
+
+static unsigned int irq_to_rising(unsigned int irq)
+{
+ struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ int offset = irq - ab8500_gpio->irq_base;
+ int new_irq = offset + AB8500_INT_GPIO6R
+ + ab8500_gpio->parent->irq_base;
+ return new_irq;
+}
+
+static unsigned int irq_to_falling(unsigned int irq)
+{
+ struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ int offset = irq - ab8500_gpio->irq_base;
+ int new_irq = offset + AB8500_INT_GPIO6F
+ + ab8500_gpio->parent->irq_base;
+ return new_irq;
+
+}
+
+static unsigned int rising_to_irq(unsigned int irq, void *dev)
+{
+ struct ab8500_gpio *ab8500_gpio = dev;
+ int offset = irq - AB8500_INT_GPIO6R
+ - ab8500_gpio->parent->irq_base ;
+ int new_irq = offset + ab8500_gpio->irq_base;
+ return new_irq;
+}
+
+static unsigned int falling_to_irq(unsigned int irq, void *dev)
+{
+ struct ab8500_gpio *ab8500_gpio = dev;
+ int offset = irq - AB8500_INT_GPIO6F
+ - ab8500_gpio->parent->irq_base ;
+ int new_irq = offset + ab8500_gpio->irq_base;
+ return new_irq;
+
+}
+
+/*
+ * IRQ handler
+ */
+
+static irqreturn_t handle_rising(int irq, void *dev)
+{
+
+ handle_nested_irq(rising_to_irq(irq , dev));
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t handle_falling(int irq, void *dev)
+{
+
+ handle_nested_irq(falling_to_irq(irq, dev));
+ return IRQ_HANDLED;
+}
+
+static void ab8500_gpio_irq_lock(unsigned int irq)
+{
+ struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ mutex_lock(&ab8500_gpio->lock);
+}
+
+static void ab8500_gpio_irq_sync_unlock(unsigned int irq)
+{
+ struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ int offset = irq - ab8500_gpio->irq_base;
+ bool rising = ab8500_gpio->rising & BIT(offset);
+ bool falling = ab8500_gpio->falling & BIT(offset);
+ int ret;
+
+ switch (ab8500_gpio->irq_action) {
+ case STARTUP:
+ if (rising)
+ ret = request_threaded_irq(irq_to_rising(irq),
+ NULL, handle_rising,
+ IRQF_TRIGGER_RISING,
+ "ab8500-gpio-r", ab8500_gpio);
+ if (falling)
+ ret = request_threaded_irq(irq_to_falling(irq),
+ NULL, handle_falling,
+ IRQF_TRIGGER_FALLING,
+ "ab8500-gpio-f", ab8500_gpio);
+ break;
+ case SHUTDOWN:
+ if (rising)
+ free_irq(irq_to_rising(irq), ab8500_gpio);
+ if (falling)
+ free_irq(irq_to_falling(irq), ab8500_gpio);
+ break;
+ case MASK:
+ if (rising)
+ disable_irq(irq_to_rising(irq));
+ if (falling)
+ disable_irq(irq_to_falling(irq));
+ break;
+ case UNMASK:
+ if (rising)
+ enable_irq(irq_to_rising(irq));
+ if (falling)
+ enable_irq(irq_to_falling(irq));
+ break;
+ case NONE:
+ break;
+ }
+ ab8500_gpio->irq_action = NONE;
+ ab8500_gpio->rising &= ~(BIT(offset));
+ ab8500_gpio->falling &= ~(BIT(offset));
+ mutex_unlock(&ab8500_gpio->lock);
+}
+
+
+static void ab8500_gpio_irq_mask(unsigned int irq)
+{
+ struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ ab8500_gpio->irq_action = MASK;
+}
+
+static void ab8500_gpio_irq_unmask(unsigned int irq)
+{
+ struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ ab8500_gpio->irq_action = UNMASK;
+}
+
+static int ab8500_gpio_irq_set_type(unsigned int irq, unsigned int type)
+{
+ struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ int offset = irq - ab8500_gpio->irq_base;
+
+ if (type == IRQ_TYPE_EDGE_BOTH) {
+ ab8500_gpio->rising = BIT(offset);
+ ab8500_gpio->falling = BIT(offset);
+ } else if (type == IRQ_TYPE_EDGE_RISING) {
+ ab8500_gpio->rising = BIT(offset);
+ } else {
+ ab8500_gpio->falling = BIT(offset);
+ }
+ return 0;
+}
+
+unsigned int ab8500_gpio_irq_startup(unsigned int irq)
+{
+ struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ ab8500_gpio->irq_action = STARTUP;
+ return 0;
+}
+
+void ab8500_gpio_irq_shutdown(unsigned int irq)
+{
+ struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ ab8500_gpio->irq_action = SHUTDOWN;
+}
+
+static struct irq_chip ab8500_gpio_irq_chip = {
+ .name = "ab8500-gpio",
+ .startup = ab8500_gpio_irq_startup,
+ .shutdown = ab8500_gpio_irq_shutdown,
+ .bus_lock = ab8500_gpio_irq_lock,
+ .bus_sync_unlock = ab8500_gpio_irq_sync_unlock,
+ .mask = ab8500_gpio_irq_mask,
+ .unmask = ab8500_gpio_irq_unmask,
+ .set_type = ab8500_gpio_irq_set_type,
+};
+
+static int ab8500_gpio_irq_init(struct ab8500_gpio *ab8500_gpio)
+{
+ u32 base = ab8500_gpio->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + AB8500_NUM_VIR_GPIO_IRQ ; irq++) {
+ set_irq_chip_data(irq, ab8500_gpio);
+ set_irq_chip_and_handler(irq, &ab8500_gpio_irq_chip,
+ handle_simple_irq);
+ set_irq_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ set_irq_noprobe(irq);
+#endif
+ }
+
+ return 0;
+}
+
+static void ab8500_gpio_irq_remove(struct ab8500_gpio *ab8500_gpio)
+{
+ int base = ab8500_gpio->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + AB8500_NUM_VIR_GPIO_IRQ; irq++) {
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, 0);
+#endif
+ set_irq_chip_and_handler(irq, NULL, NULL);
+ set_irq_chip_data(irq, NULL);
+ }
+}
+
+static int __devinit ab8500_gpio_probe(struct platform_device *pdev)
+{
+ struct ab8500_platform_data *ab8500_pdata =
+ dev_get_platdata(pdev->dev.parent);
+ struct ab8500_gpio_platform_data *pdata;
+ struct ab8500_gpio *ab8500_gpio;
+ int ret;
+ int i;
+
+ pdata = ab8500_pdata->gpio;
+ if (!pdata) {
+ dev_err(&pdev->dev, "gpio platform data missing\n");
+ return -ENODEV;
+ }
+
+ ab8500_gpio = kzalloc(sizeof(struct ab8500_gpio), GFP_KERNEL);
+ if (ab8500_gpio == NULL) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+ ab8500_gpio->dev = &pdev->dev;
+ ab8500_gpio->parent = dev_get_drvdata(pdev->dev.parent);
+ ab8500_gpio->chip = ab8500gpio_chip;
+ ab8500_gpio->chip.ngpio = AB8500_NUM_GPIO;
+ ab8500_gpio->chip.dev = &pdev->dev;
+ ab8500_gpio->chip.base = pdata->gpio_base;
+ ab8500_gpio->irq_base = pdata->irq_base;
+ /* initialize the lock */
+ mutex_init(&ab8500_gpio->lock);
+ /*
+ * AB8500 core will handle and clear the IRQ
+ * configre GPIO based on config-reg value.
+ * These values are for selecting the PINs as
+ * GPIO or alternate function
+ */
+ for (i = AB8500_GPIO_SEL1_REG; i <= AB8500_GPIO_SEL6_REG; i++) {
+ ret = abx500_set_register_interruptible(ab8500_gpio->dev,
+ AB8500_MISC, i,
+ pdata->config_reg[i]);
+ if (ret < 0)
+ goto out_free;
+ }
+ ret = abx500_set_register_interruptible(ab8500_gpio->dev, AB8500_MISC,
+ AB8500_GPIO_ALTFUN_REG,
+ pdata->config_reg[ALTFUN_REG_INDEX]);
+ if (ret < 0)
+ goto out_free;
+
+ ret = ab8500_gpio_irq_init(ab8500_gpio);
+ if (ret)
+ goto out_free;
+ ret = gpiochip_add(&ab8500_gpio->chip);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to add gpiochip: %d\n",
+ ret);
+ goto out_rem_irq;
+ }
+ platform_set_drvdata(pdev, ab8500_gpio);
+ return 0;
+
+out_rem_irq:
+ ab8500_gpio_irq_remove(ab8500_gpio);
+out_free:
+ mutex_destroy(&ab8500_gpio->lock);
+ kfree(ab8500_gpio);
+ return ret;
+}
+
+/*
+ * ab8500_gpio_remove() - remove Ab8500-gpio driver
+ * @pdev : Platform device registered
+ */
+static int __devexit ab8500_gpio_remove(struct platform_device *pdev)
+{
+ struct ab8500_gpio *ab8500_gpio = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = gpiochip_remove(&ab8500_gpio->chip);
+ if (ret < 0) {
+ dev_err(ab8500_gpio->dev, "unable to remove gpiochip:\
+ %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, NULL);
+ mutex_destroy(&ab8500_gpio->lock);
+ kfree(ab8500_gpio);
+
+ return 0;
+}
+
+static struct platform_driver ab8500_gpio_driver = {
+ .driver = {
+ .name = "ab8500-gpio",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab8500_gpio_probe,
+ .remove = __devexit_p(ab8500_gpio_remove),
+};
+
+static int __init ab8500_gpio_init(void)
+{
+ return platform_driver_register(&ab8500_gpio_driver);
+}
+arch_initcall(ab8500_gpio_init);
+
+static void __exit ab8500_gpio_exit(void)
+{
+ platform_driver_unregister(&ab8500_gpio_driver);
+}
+module_exit(ab8500_gpio_exit);
+
+MODULE_AUTHOR("BIBEK BASU <bibek.basu@stericsson.com>");
+MODULE_DESCRIPTION("Driver allows to use AB8500 unused pins\
+ to be used as GPIO");
+MODULE_ALIAS("AB8500 GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/janz-ttl.c b/drivers/gpio/janz-ttl.c
index 813ac077e5d..2514fb075f4 100644
--- a/drivers/gpio/janz-ttl.c
+++ b/drivers/gpio/janz-ttl.c
@@ -15,6 +15,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/slab.h>
@@ -149,7 +150,7 @@ static int __devinit ttl_probe(struct platform_device *pdev)
struct resource *res;
int ret;
- pdata = pdev->dev.platform_data;
+ pdata = mfd_get_data(pdev);
if (!pdata) {
dev_err(dev, "no platform data\n");
ret = -ENXIO;
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index 2975d22daff..838ddbdf90c 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -232,7 +232,7 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
desc->irq_data.chip->irq_unmask(&desc->irq_data);
}
-static int pl061_probe(struct amba_device *dev, struct amba_id *id)
+static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
{
struct pl061_platform_data *pdata;
struct pl061_gpio *chip;
diff --git a/drivers/gpio/rdc321x-gpio.c b/drivers/gpio/rdc321x-gpio.c
index 897e0577e65..a9bda881935 100644
--- a/drivers/gpio/rdc321x-gpio.c
+++ b/drivers/gpio/rdc321x-gpio.c
@@ -27,6 +27,7 @@
#include <linux/pci.h>
#include <linux/gpio.h>
#include <linux/mfd/rdc321x.h>
+#include <linux/mfd/core.h>
#include <linux/slab.h>
struct rdc321x_gpio {
@@ -135,7 +136,7 @@ static int __devinit rdc321x_gpio_probe(struct platform_device *pdev)
struct rdc321x_gpio *rdc321x_gpio_dev;
struct rdc321x_gpio_pdata *pdata;
- pdata = platform_get_drvdata(pdev);
+ pdata = mfd_get_data(pdev);
if (!pdata) {
dev_err(&pdev->dev, "no platform data supplied\n");
return -ENODEV;
diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/timbgpio.c
index 58c8f30352d..ffcd815b8b8 100644
--- a/drivers/gpio/timbgpio.c
+++ b/drivers/gpio/timbgpio.c
@@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/timb_gpio.h>
@@ -228,7 +229,7 @@ static int __devinit timbgpio_probe(struct platform_device *pdev)
struct gpio_chip *gc;
struct timbgpio *tgpio;
struct resource *iomem;
- struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
+ struct timbgpio_platform_data *pdata = mfd_get_data(pdev);
int irq = platform_get_irq(pdev, 0);
if (!pdata || pdata->nr_pins > 32) {
@@ -319,14 +320,13 @@ err_mem:
static int __devexit timbgpio_remove(struct platform_device *pdev)
{
int err;
- struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
struct timbgpio *tgpio = platform_get_drvdata(pdev);
struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
int irq = platform_get_irq(pdev, 0);
if (irq >= 0 && tgpio->irq_base > 0) {
int i;
- for (i = 0; i < pdata->nr_pins; i++) {
+ for (i = 0; i < tgpio->gpio.ngpio; i++) {
set_irq_chip(tgpio->irq_base + i, NULL);
set_irq_chip_data(tgpio->irq_base + i, NULL);
}
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 4b4b5455b00..be8184dac87 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -166,3 +166,27 @@ config DRM_SAVAGE
help
Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
chipset. If M is selected the module will be called savage.
+
+config DRM_OMAP
+ tristate "OMAP GPU (EXPERIMENTAL)"
+ depends on DRM && !CONFIG_FB_OMAP2
+ select DRM_KMS_HELPER
+ select OMAP2_VRAM
+ select OMAP2_DSS
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ default y
+ help
+ DRM display driver for OMAP2/3/4 based boards.
+
+config DRM_OMAP_NUM_CRTCS
+ int "Number of CRTCs"
+ range 1 10
+ default 1 if ARCH_OMAP2 || ARCH_OMAP3
+ default 2 if ARCH_OMAP4
+ depends on DRM_OMAP
+ help
+ Select the number of video overlays which can be used as framebuffers.
+ The remaining overlays are reserved for video.
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 997c43d0490..dc1a49620e7 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -36,4 +36,5 @@ obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
obj-$(CONFIG_DRM_VIA) +=via/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
+obj-$(CONFIG_DRM_OMAP) += omap/
obj-y += i2c/
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 271835a7157..7301d5e7fda 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -262,15 +262,12 @@ EXPORT_SYMBOL(drm_init);
void drm_exit(struct drm_driver *driver)
{
- struct drm_device *dev, *tmp;
DRM_DEBUG("\n");
- if (driver->driver_features & DRIVER_MODESET) {
- pci_unregister_driver(&driver->pci_driver);
- } else {
- list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
- drm_put_dev(dev);
- }
+ if (driver->driver_features & DRIVER_USE_PLATFORM_DEVICE)
+ drm_platform_exit(driver);
+ else
+ drm_pci_exit(driver);
DRM_INFO("Module unloaded\n");
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index f73ef4390db..95072047396 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -627,6 +627,11 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
value = (red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset);
+ if (info->var.transp.length > 0) {
+ u32 mask = (1 << info->var.transp.length) - 1;
+ mask <<= info->var.transp.offset;
+ value |= mask;
+ }
palette[regno] = value;
return 0;
}
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index be9a9c07d15..73bf49360c9 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -54,11 +54,11 @@ int drm_name_info(struct seq_file *m, void *data)
if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) {
if (master->unique) {
seq_printf(m, "%s %s %s\n",
- dev->driver->platform_device->name,
+ dev->platformdev->name,
dev_name(dev->dev), master->unique);
} else {
seq_printf(m, "%s\n",
- dev->driver->platform_device->name);
+ dev->platformdev->name);
}
} else {
if (master->unique) {
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 47db4df37a6..075024b88f4 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -165,14 +165,15 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
drm_unset_busid(dev, master);
if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) {
- master->unique_len = 10 + strlen(dev->platformdev->name);
+ master->unique_len = 13 + strlen(dev->platformdev->name);
+ master->unique_size = master->unique_len;
master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
if (master->unique == NULL)
return -ENOMEM;
len = snprintf(master->unique, master->unique_len,
- "platform:%s", dev->platformdev->name);
+ "platform:%s:%02d", dev->platformdev->name, dev->primary->index);
if (len > master->unique_len) {
DRM_ERROR("Unique buffer overflowed\n");
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index f5bd9e590c8..6f7e41b5816 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -263,6 +263,19 @@ int drm_pci_init(struct drm_driver *driver)
return 0;
}
+void drm_pci_exit(struct drm_driver *driver)
+{
+ struct drm_device *dev, *tmp;
+
+ if (driver->driver_features & DRIVER_MODESET) {
+ pci_unregister_driver(&driver->pci_driver);
+ } else {
+ list_for_each_entry_safe(dev, tmp, &driver->device_list,
+ driver_item)
+ drm_put_dev(dev);
+ }
+}
+
#else
int drm_pci_init(struct drm_driver *driver)
@@ -270,5 +283,9 @@ int drm_pci_init(struct drm_driver *driver)
return -1;
}
+void drm_pci_exit(struct drm_driver *driver)
+{
+}
+
#endif
/*@}*/
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 92d1d0fb7b7..8f68a5431db 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -123,5 +123,10 @@ EXPORT_SYMBOL(drm_get_platform_dev);
int drm_platform_init(struct drm_driver *driver)
{
- return drm_get_platform_dev(driver->platform_device, driver);
+ return platform_driver_register(&driver->platform_driver);
+}
+
+void drm_platform_exit(struct drm_driver *driver)
+{
+ platform_driver_unregister(&driver->platform_driver);
}
diff --git a/drivers/gpu/drm/omap/Makefile b/drivers/gpu/drm/omap/Makefile
new file mode 100644
index 00000000000..0a2513f563f
--- /dev/null
+++ b/drivers/gpu/drm/omap/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+omapgpu-y := omap_gpu.o omap_crtc.o omap_encoder.o omap_connector.o omap_fb.o omap_fbdev.o
+
+obj-$(CONFIG_DRM_OMAP) += omapgpu.o
diff --git a/drivers/gpu/drm/omap/omap_connector.c b/drivers/gpu/drm/omap/omap_connector.c
new file mode 100644
index 00000000000..aefafd79a1f
--- /dev/null
+++ b/drivers/gpu/drm/omap/omap_connector.c
@@ -0,0 +1,504 @@
+/*
+ * linux/drivers/gpu/drm/omap/omap_connector.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/omap_gpu.h>
+#include "omap_gpu_priv.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+/*
+ * connector funcs
+ */
+
+#define to_omap_connector(x) container_of(x, struct omap_connector, base)
+
+struct omap_connector {
+ struct drm_connector base;
+ struct omap_dss_device *dssdev;
+ struct drm_display_mode *native_mode;
+};
+
+static inline void copy_timings_omap_to_drm(struct drm_display_mode *mode,
+ struct omap_video_timings *timings)
+{
+ mode->clock = timings->pixel_clock;
+
+ mode->hdisplay = timings->x_res;
+ mode->hsync_start = mode->hdisplay + timings->hfp;
+ mode->hsync_end = mode->hsync_start + timings->hsw;
+ mode->htotal = mode->hsync_end + timings->hbp;
+
+ mode->vdisplay = timings->y_res;
+ mode->vsync_start = mode->vdisplay + timings->vfp;
+ mode->vsync_end = mode->vsync_start + timings->vsw;
+ mode->vtotal = mode->vsync_end + timings->vbp;
+
+ /* note: whether or not it is interlaced, +/- h/vsync, etc,
+ * which should be set in the mode flags, is not exposed in
+ * the omap_video_timings struct.. but hdmi driver tracks
+ * those separately so all we have to have to set the mode
+ * is the way to recover these timings values, and the
+ * omap_dss_driver would do the rest.
+ */
+}
+
+static inline void copy_timings_drm_to_omap(struct omap_video_timings *timings,
+ struct drm_display_mode *mode)
+{
+ timings->pixel_clock = mode->clock;
+
+ timings->x_res = mode->hdisplay;
+ timings->hfp = mode->hsync_start - mode->hdisplay;
+ timings->hsw = mode->hsync_end - mode->hsync_start;
+ timings->hbp = mode->htotal - mode->hsync_end;
+
+ timings->y_res = mode->vdisplay;
+ timings->vfp = mode->vsync_start - mode->vdisplay;
+ timings->vsw = mode->vsync_end - mode->vsync_start;
+ timings->vbp = mode->vtotal - mode->vsync_end;
+}
+
+void omap_connector_dpms(struct drm_connector *connector, int mode)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+
+ /* TODO: add API in DSS to suspend/resume individual displays.. */
+
+ DBG("%s: %d", dssdev->name, mode);
+}
+
+enum drm_connector_status omap_connector_detect(
+ struct drm_connector *connector, bool force)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+ enum drm_connector_status ret;
+
+ if (dssdrv->is_detected(dssdev)) {
+ ret = connector_status_connected;
+ } else {
+ ret = connector_status_disconnected;
+ }
+
+ DBG("%s: %d (force=%d)", omap_connector->dssdev->name, ret, force);
+
+ return ret;
+}
+
+static void omap_connector_destroy(struct drm_connector *connector)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+
+ DBG("%s", omap_connector->dssdev->name);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(omap_connector);
+}
+
+static struct drm_display_mode * omap_connector_native_mode(
+ struct drm_connector *connector)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode, *largest = NULL;
+ int high_w = 0, high_h = 0, high_v = 0;
+
+ list_for_each_entry(mode, &omap_connector->base.probed_modes, head) {
+ mode->vrefresh = drm_mode_vrefresh(mode);
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ continue;
+
+ /* Use preferred mode if there is one */
+ if (mode->type & DRM_MODE_TYPE_PREFERRED) {
+ DBG("native mode from preferred: %dx%d@%d",
+ mode->hdisplay, mode->vdisplay, mode->vrefresh);
+ return drm_mode_duplicate(dev, mode);
+ }
+
+ /* Otherwise, take the resolution with the largest width, then
+ * height, then vertical refresh
+ */
+ if (mode->hdisplay < high_w)
+ continue;
+
+ if (mode->hdisplay == high_w && mode->vdisplay < high_h)
+ continue;
+
+ if (mode->hdisplay == high_w && mode->vdisplay == high_h &&
+ mode->vrefresh < high_v)
+ continue;
+
+ high_w = mode->hdisplay;
+ high_h = mode->vdisplay;
+ high_v = mode->vrefresh;
+ largest = mode;
+ }
+
+ DBG("native mode from largest: %dx%d@%d", high_w, high_h, high_v);
+ return largest ? drm_mode_duplicate(dev, largest) : NULL;
+}
+
+struct moderec {
+ int hdisplay;
+ int vdisplay;
+};
+
+static struct moderec scaler_modes[] = {
+ { 1920, 1200 },
+ { 1920, 1080 },
+ { 1680, 1050 },
+ { 1600, 1200 },
+ { 1400, 1050 },
+ { 1400, 900 },
+ { 1280, 1024 },
+ { 1280, 960 },
+ { 1280, 720 },
+ { 1152, 768 },
+ { 1024, 768 },
+ { 800, 600 },
+ { 720, 480 },
+ { 640, 480 },
+ {}
+};
+
+static int omap_connector_scaler_modes_add(struct drm_connector *connector)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct drm_display_mode *native = omap_connector->native_mode, *m;
+ struct drm_device *dev = connector->dev;
+ struct moderec *mode = &scaler_modes[0];
+ int modes = 0;
+
+ if (!native)
+ return 0;
+
+ while (mode->hdisplay) {
+ if (mode->hdisplay <= native->hdisplay &&
+ mode->vdisplay <= native->vdisplay) {
+ m = drm_cvt_mode(dev, mode->hdisplay, mode->vdisplay,
+ 60, true, false, false);
+ if (!m)
+ continue;
+
+ m->type |= DRM_MODE_TYPE_DRIVER;
+
+ DBG("adding scaler mode: %dx%d@%d", mode->hdisplay,
+ mode->vdisplay, drm_mode_vrefresh(m));
+ drm_mode_probed_add(connector, m);
+ modes++;
+ }
+ mode++;
+ }
+
+ return modes;
+}
+
+#define MAX_EDID 256
+
+static int omap_connector_get_modes(struct drm_connector *connector)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+ struct drm_device *dev = connector->dev;
+ int n = 0;
+
+ DBG("%s", omap_connector->dssdev->name);
+
+ if (omap_connector->native_mode) {
+ drm_mode_destroy(dev, omap_connector->native_mode);
+ omap_connector->native_mode = NULL;
+ }
+
+ /* if display exposes EDID, then we parse that in the normal way to
+ * build table of supported modes.. otherwise (ie. fixed resolution
+ * LCD panels) we just return a single mode corresponding to the
+ * currently configured timings:
+ */
+ if (dssdrv->get_edid) {
+ void *edid = kzalloc(MAX_EDID, GFP_KERNEL);
+
+ if ((dssdrv->get_edid(dssdev, edid, MAX_EDID) == 0) &&
+ drm_edid_is_valid(edid)) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ n = drm_add_edid_modes(connector, edid);
+ omap_connector->native_mode =
+ omap_connector_native_mode(connector);
+ n += omap_connector_scaler_modes_add(connector);
+ kfree(connector->display_info.raw_edid);
+ connector->display_info.raw_edid = edid;
+ } else {
+ drm_mode_connector_update_edid_property(connector, NULL);
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ }
+ } else {
+ struct drm_display_mode *mode = drm_mode_create(dev);
+ struct omap_video_timings timings;
+
+ dssdrv->get_timings(dssdev, &timings);
+
+ copy_timings_omap_to_drm(mode, &timings);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ n = 1;
+ }
+
+ return n;
+}
+
+static int omap_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+ struct omap_video_timings timings = {0};
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *new_mode;
+ int ret = MODE_BAD;
+
+ copy_timings_drm_to_omap(&timings, mode);
+ mode->vrefresh = drm_mode_vrefresh(mode);
+
+ if (!dssdrv->check_timings(dssdev, &timings)) {
+ /* check if vrefresh is still valid */
+ new_mode = drm_mode_duplicate(dev, mode);
+ new_mode->clock = timings.pixel_clock;
+ new_mode->vrefresh = 0;
+ if (mode->vrefresh == drm_mode_vrefresh(new_mode))
+ ret = MODE_OK;
+ drm_mode_destroy(dev, new_mode);
+ }
+
+ DBG("connector: mode %s: "
+ "%d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ (ret == MODE_OK) ? "valid" : "invalid",
+ mode->base.id, mode->name, mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+
+ return ret;
+}
+
+struct drm_encoder * omap_connector_attached_encoder(
+ struct drm_connector *connector)
+{
+ int i;
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ struct drm_mode_object *obj;
+
+ if (connector->encoder_ids[i] == 0)
+ break;
+
+ obj = drm_mode_object_find(connector->dev,
+ connector->encoder_ids[i],
+ DRM_MODE_OBJECT_ENCODER);
+
+ if (obj) {
+ struct drm_encoder *encoder = obj_to_encoder(obj);
+ struct omap_overlay_manager *mgr =
+ omap_encoder_get_manager(encoder);
+ DBG("%s: found %s", omap_connector->dssdev->name,
+ mgr->name);
+ return encoder;
+ }
+ }
+
+ DBG("%s: no encoder", omap_connector->dssdev->name);
+
+ return NULL;
+}
+
+static const struct drm_connector_funcs omap_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = omap_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = omap_connector_destroy,
+};
+
+static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
+ .get_modes = omap_connector_get_modes,
+ .mode_valid = omap_connector_mode_valid,
+ .best_encoder = omap_connector_attached_encoder,
+};
+
+/* called from encoder when mode is set, to propagate settings to the dssdev */
+void omap_connector_mode_set(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+ struct omap_video_timings timings;
+
+ copy_timings_drm_to_omap(&timings, mode);
+
+ DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ omap_connector->dssdev->name,
+ mode->base.id, mode->name, mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+
+ if (dssdrv->check_timings(dssdev, &timings)) {
+ dev_err(dev->dev, "could not set timings\n");
+ return;
+ }
+
+ dssdrv->set_timings(dssdev, &timings);
+}
+
+enum omap_dss_update_mode omap_connector_get_update_mode(
+ struct drm_connector *connector)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+
+ DBG("%s", omap_connector->dssdev->name);
+
+ if (dssdrv->get_update_mode) {
+ return dssdrv->get_update_mode(dssdev);
+ }
+
+ return -1;
+}
+EXPORT_SYMBOL(omap_connector_get_update_mode);
+
+int omap_connector_set_update_mode(struct drm_connector *connector,
+ enum omap_dss_update_mode mode)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+
+ DBG("%s: %d", omap_connector->dssdev->name, mode);
+
+ if (dssdrv->set_update_mode) {
+ return dssdrv->set_update_mode(dssdev, mode);
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(omap_connector_set_update_mode);
+
+int omap_connector_sync(struct drm_connector *connector)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+
+ DBG("%s", omap_connector->dssdev->name);
+
+ if (dssdrv->sync) {
+ return dssdrv->sync(dssdev);
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(omap_connector_sync);
+
+/* flush an area of the framebuffer (in case of manual update display that
+ * is not automatically flushed)
+ */
+void omap_connector_flush(struct drm_connector *connector,
+ int x, int y, int w, int h)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+
+ /* TODO: enable when supported in dss */
+ VERB("%s: %d,%d, %dx%d", omap_connector->dssdev->name, x, y, w, h);
+}
+
+/* initialize connector */
+struct drm_connector * omap_connector_init(struct drm_device *dev,
+ int connector_type, struct omap_dss_device *dssdev)
+{
+ struct drm_connector *connector = NULL;
+ struct omap_connector *omap_connector;
+
+ DBG("%s", dssdev->name);
+
+ omap_connector = kzalloc(sizeof(struct omap_connector), GFP_KERNEL);
+ if (!omap_connector) {
+ dev_err(dev->dev, "could not allocate connector\n");
+ goto fail;
+ }
+
+ omap_connector->dssdev = dssdev;
+ connector = &omap_connector->base;
+
+ drm_connector_init(dev, connector, &omap_connector_funcs,
+ connector_type);
+ drm_connector_helper_add(connector, &omap_connector_helper_funcs);
+
+ if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_HPD) {
+ connector->polled = 0;
+ } else {
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
+
+ connector->interlace_allowed = 1;
+ connector->doublescan_allowed = 0;
+
+ drm_sysfs_connector_add(connector);
+
+ /* store resume info for suspended displays */
+ switch (dssdev->state) {
+ case OMAP_DSS_DISPLAY_SUSPENDED:
+ dssdev->activate_after_resume = true;
+ break;
+ case OMAP_DSS_DISPLAY_DISABLED:
+ if (dssdev->driver) {
+ int ret = dssdev->driver->enable(dssdev);
+ if (ret) {
+ DBG("%s: failed to enable: %d",
+ dssdev->name, ret);
+ dssdev->driver->disable(dssdev);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return connector;
+
+fail:
+ if (connector) {
+ drm_connector_cleanup(connector);
+ kfree(omap_connector);
+ }
+
+ return NULL;
+}
diff --git a/drivers/gpu/drm/omap/omap_crtc.c b/drivers/gpu/drm/omap/omap_crtc.c
new file mode 100644
index 00000000000..1726fd76f14
--- /dev/null
+++ b/drivers/gpu/drm/omap/omap_crtc.c
@@ -0,0 +1,277 @@
+/*
+ * linux/drivers/gpu/drm/omap/omap_crtc.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/omap_gpu.h>
+#include "omap_gpu_priv.h"
+
+#include "drm_mode.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+#define to_omap_crtc(x) container_of(x, struct omap_crtc, base)
+
+struct omap_crtc {
+ struct drm_crtc base;
+ struct omap_overlay *ovl;
+ struct omap_overlay_info info;
+};
+
+static int commit(struct drm_crtc *crtc);
+
+/* update parameters that are dependent on the framebuffer dimensions and
+ * position within the fb that this crtc scans out from. This is called
+ * when framebuffer dimensions or x,y base may have changed, either due
+ * to our mode, or a change in another crtc that is scanning out of the
+ * same fb.
+ */
+static void omap_crtc_update_scanout(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ unsigned long paddr;
+ void __iomem *vaddr;
+ int screen_width;
+
+ omap_framebuffer_get_buffer(crtc->fb, crtc->x, crtc->y,
+ &vaddr, &paddr, &screen_width);
+
+ DBG("%s: %d,%d: %p %08x (%d)", omap_crtc->ovl->name,
+ crtc->x, crtc->y, vaddr, paddr, screen_width);
+
+ omap_crtc->info.paddr = paddr;
+ omap_crtc->info.vaddr = vaddr;
+ omap_crtc->info.screen_width = screen_width;
+}
+
+static void omap_crtc_gamma_set(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue, uint32_t start, uint32_t size)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ DBG("%s", omap_crtc->ovl->name);
+ // XXX ignore?
+}
+
+static void omap_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ DBG("%s", omap_crtc->ovl->name);
+ drm_crtc_cleanup(crtc);
+ kfree(omap_crtc);
+}
+
+static void omap_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+ DBG("%s: %d", omap_crtc->ovl->name, mode);
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ omap_crtc_update_scanout(crtc);
+ omap_crtc->info.enabled = true;
+ } else {
+ omap_crtc->info.enabled = false;
+ }
+
+ commit(crtc);
+}
+
+static bool omap_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ // XXX I guess we support anything?
+ DBG("%s", omap_crtc->ovl->name);
+ return true;
+}
+
+static int omap_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+ DBG("%s: %d,%d: %dx%d",omap_crtc->ovl->name, x, y,
+ mode->hdisplay, mode->vdisplay);
+
+ /* just use adjusted mode */
+ mode = adjusted_mode;
+
+ omap_crtc->info.width = mode->hdisplay;
+ omap_crtc->info.height = mode->vdisplay;
+ omap_crtc->info.out_width = mode->hdisplay;
+ omap_crtc->info.out_height = mode->vdisplay;
+ omap_crtc->info.color_mode = OMAP_DSS_COLOR_ARGB32;
+ omap_crtc->info.rotation_type = OMAP_DSS_ROT_DMA;
+ omap_crtc->info.rotation = OMAP_DSS_ROT_0;
+#if 0 /* enable when supported in dss */
+ omap_crtc->info.zorder = 3; /* GUI in the front, video behind */
+#endif
+ omap_crtc->info.global_alpha = 0xff;
+ omap_crtc->info.mirror = 0;
+ omap_crtc->info.mirror = 0;
+ omap_crtc->info.pos_x = 0;
+ omap_crtc->info.pos_y = 0;
+#if 0 /* enable when supported in dss */
+ omap_crtc->info.min_x_decim = 1;
+ omap_crtc->info.max_x_decim = 1;
+ omap_crtc->info.min_y_decim = 1;
+ omap_crtc->info.max_y_decim = 1;
+#endif
+
+ omap_crtc_update_scanout(crtc);
+
+ return 0;
+}
+
+static void omap_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct omap_overlay *ovl = omap_crtc->ovl;
+
+ DBG("%s", omap_crtc->ovl->name);
+
+ ovl->get_overlay_info(ovl, &omap_crtc->info);
+
+ omap_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static int commit(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct omap_overlay *ovl = omap_crtc->ovl;
+ struct omap_overlay_info *info = &omap_crtc->info;
+ int ret;
+
+ DBG("%s", omap_crtc->ovl->name);
+ DBG("%dx%d -> %dx%d (%d)", info->width, info->height, info->out_width,
+ info->out_height, info->screen_width);
+ DBG("%d,%d %p %08x", info->pos_x, info->pos_y, info->vaddr,
+ info->paddr);
+
+ /* NOTE: do we want to do this at all here, or just wait
+ * for dpms(ON) since other CRTC's may not have their mode
+ * set yet, so fb dimensions may still change..
+ */
+ ret = ovl->set_overlay_info(ovl, info);
+ if (ret) {
+ dev_err(dev->dev, "could not set overlay info\n");
+ return ret;
+ }
+
+ /* our encoder doesn't necessarily get a commit() after this, in
+ * particular in the dpms() and mode_set_base() cases, so force the
+ * manager to update:
+ *
+ * could this be in the encoder somehow?
+ */
+ if (ovl->manager) {
+ ret = ovl->manager->apply(ovl->manager);
+ if (ret) {
+ dev_err(dev->dev, "could not apply\n");
+ return ret;
+ }
+ }
+
+ if (info->enabled) {
+ omap_framebuffer_flush(crtc->fb, crtc->x, crtc->y,
+ crtc->fb->width, crtc->fb->height);
+ }
+
+ return 0;
+}
+
+static void omap_crtc_commit(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ DBG("%s", omap_crtc->ovl->name);
+ omap_crtc_dpms (crtc, DRM_MODE_DPMS_ON);
+}
+
+static int omap_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+ DBG("%s %d,%d: fb=%p", omap_crtc->ovl->name, x, y, old_fb);
+
+ omap_crtc_update_scanout(crtc);
+
+ return commit(crtc);
+}
+
+void omap_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ DBG("%s", omap_crtc->ovl->name);
+}
+
+static const struct drm_crtc_funcs omap_crtc_funcs = {
+ .gamma_set = omap_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = omap_crtc_destroy,
+//TODO .page_flip = omap_page_flip,
+};
+
+static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
+ .dpms = omap_crtc_dpms,
+ .mode_fixup = omap_crtc_mode_fixup,
+ .mode_set = omap_crtc_mode_set,
+ .prepare = omap_crtc_prepare,
+ .commit = omap_crtc_commit,
+ .mode_set_base = omap_crtc_mode_set_base,
+ .load_lut = omap_crtc_load_lut,
+};
+
+struct omap_overlay * omap_crtc_get_overlay(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ return omap_crtc->ovl;
+}
+
+/* initialize crtc */
+struct drm_crtc * omap_crtc_init(struct drm_device *dev,
+ struct omap_overlay *ovl)
+{
+ struct drm_crtc *crtc = NULL;
+ struct omap_crtc *omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL);
+
+ DBG("%s", ovl->name);
+
+ if (!omap_crtc) {
+ dev_err(dev->dev, "could not allocate CRTC\n");
+ goto fail;
+ }
+
+ omap_crtc->ovl = ovl;
+ crtc = &omap_crtc->base;
+ drm_crtc_init(dev, crtc, &omap_crtc_funcs);
+ drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs);
+
+ return crtc;
+
+fail:
+ if (crtc) {
+ drm_crtc_cleanup(crtc);
+ kfree(omap_crtc);
+ }
+ return NULL;
+}
diff --git a/drivers/gpu/drm/omap/omap_encoder.c b/drivers/gpu/drm/omap/omap_encoder.c
new file mode 100644
index 00000000000..e60fe81073b
--- /dev/null
+++ b/drivers/gpu/drm/omap/omap_encoder.c
@@ -0,0 +1,198 @@
+/*
+ * linux/drivers/gpu/drm/omap/omap_encoder.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/omap_gpu.h>
+#include "omap_gpu_priv.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+/*
+ * encoder funcs
+ */
+
+#define to_omap_encoder(x) container_of(x, struct omap_encoder, base)
+
+struct omap_encoder {
+ struct drm_encoder base;
+ struct omap_overlay_manager *mgr;
+};
+
+static void omap_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ DBG("%s", omap_encoder->mgr->name);
+ drm_encoder_cleanup(encoder);
+ kfree(omap_encoder);
+}
+
+static void omap_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct omap_gpu_private *priv = dev->dev_private;
+ int i;
+
+ DBG("%s: %d", omap_encoder->mgr->name, mode);
+
+ /* managers don't need to do anything for DPMS.. but we do
+ * need to propagate to the connector, who is actually going
+ * to enable/disable as needed:
+ */
+ for (i = 0; i < priv->num_connectors; i++) {
+ struct drm_connector *connector = priv->connectors[i];
+ if (connector->encoder == encoder) {
+ omap_connector_dpms(connector, mode);
+ }
+ }
+}
+
+static bool omap_encoder_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ DBG("%s", omap_encoder->mgr->name);
+ return true;
+}
+
+static void omap_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct omap_gpu_private *priv = dev->dev_private;
+ int i;
+
+ mode = adjusted_mode;
+
+ DBG("%s: set mode: %dx%d", omap_encoder->mgr->name,
+ mode->hdisplay, mode->vdisplay);
+
+ for (i = 0; i < priv->num_connectors; i++) {
+ struct drm_connector *connector = priv->connectors[i];
+ if (connector->encoder == encoder) {
+ omap_connector_mode_set(connector, mode);
+ }
+ }
+}
+
+static void omap_encoder_prepare(struct drm_encoder *encoder)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ struct drm_encoder_helper_funcs *encoder_funcs =
+ encoder->helper_private;
+ DBG("%s", omap_encoder->mgr->name);
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void omap_encoder_commit(struct drm_encoder *encoder)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ struct drm_encoder_helper_funcs *encoder_funcs =
+ encoder->helper_private;
+ DBG("%s", omap_encoder->mgr->name);
+ omap_encoder->mgr->apply(omap_encoder->mgr);
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static const struct drm_encoder_funcs omap_encoder_funcs = {
+ .destroy = omap_encoder_destroy,
+};
+
+static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
+ .dpms = omap_encoder_dpms,
+ .mode_fixup = omap_encoder_mode_fixup,
+ .mode_set = omap_encoder_mode_set,
+ .prepare = omap_encoder_prepare,
+ .commit = omap_encoder_commit,
+};
+
+struct omap_overlay_manager * omap_encoder_get_manager(
+ struct drm_encoder *encoder)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ return omap_encoder->mgr;
+}
+
+/* maybe this could go away and we just use drm_vblank_wait()? */
+int omap_encoder_wait_for_vsync(struct drm_encoder *encoder)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ DBG("%s", omap_encoder->mgr->name);
+ return omap_encoder->mgr->wait_for_vsync(omap_encoder->mgr);
+}
+EXPORT_SYMBOL(omap_encoder_wait_for_vsync);
+
+/* initialize encoder */
+struct drm_encoder * omap_encoder_init(struct drm_device *dev,
+ struct omap_overlay_manager *mgr)
+{
+ struct drm_encoder *encoder = NULL;
+ struct omap_encoder *omap_encoder;
+ struct omap_overlay_manager_info info;
+ int ret;
+
+ DBG("%s", mgr->name);
+
+ omap_encoder = kzalloc(sizeof(*omap_encoder), GFP_KERNEL);
+ if (!omap_encoder) {
+ dev_err(dev->dev, "could not allocate encoder\n");
+ goto fail;
+ }
+
+ omap_encoder->mgr = mgr;
+ encoder = &omap_encoder->base;
+
+ mgr->get_manager_info(mgr, &info);
+
+ /* TODO: fix hard-coded setup.. */
+ info.default_color = 0x00000000;
+ info.trans_key = 0x00000000;
+ info.trans_key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+ info.trans_enabled = false;
+ info.alpha_enabled = false;
+
+ ret = mgr->set_manager_info(mgr, &info);
+ if (ret) {
+ dev_err(dev->dev, "could not set manager info\n");
+ goto fail;
+ }
+
+ ret = mgr->apply(mgr);
+ if (ret) {
+ dev_err(dev->dev, "could not apply\n");
+ goto fail;
+ }
+
+ drm_encoder_init(dev, encoder, &omap_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &omap_encoder_helper_funcs);
+
+ return encoder;
+
+fail:
+ if (encoder) {
+ drm_encoder_cleanup(encoder);
+ kfree(omap_encoder);
+ }
+
+ return NULL;
+}
diff --git a/drivers/gpu/drm/omap/omap_fb.c b/drivers/gpu/drm/omap/omap_fb.c
new file mode 100644
index 00000000000..b3ac7fb21ac
--- /dev/null
+++ b/drivers/gpu/drm/omap/omap_fb.c
@@ -0,0 +1,368 @@
+/*
+ * linux/drivers/gpu/drm/omap/omap_fb.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <plat/vram.h>
+
+#include <linux/omap_gpu.h>
+#include "omap_gpu_priv.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+
+static char *def_vram;
+module_param_named(vram, def_vram, charp, 0);
+
+/*
+ * framebuffer funcs
+ */
+
+#define to_omap_framebuffer(x) container_of(x, struct omap_framebuffer, base)
+
+struct omap_framebuffer {
+ struct drm_framebuffer base;
+
+ /* framebuffer size/phys-addr/virt-addr */
+ int size;
+ unsigned long paddr;
+ void __iomem *vaddr;
+};
+
+
+/* copied from omapfb-main.c to preserve vram param syntax */
+static int parse_vram_param(const char *param, int max_entries,
+ unsigned long *sizes, unsigned long *paddrs)
+{
+ int fbnum;
+ unsigned long size;
+ unsigned long paddr = 0;
+ char *p, *start;
+
+ DBG("vram: %s", param);
+
+ start = (char *)param;
+
+ while (1) {
+ p = start;
+
+ fbnum = simple_strtoul(p, &p, 10);
+
+ if ((p == param) || (*p != ':') || (fbnum >= max_entries))
+ return -EINVAL;
+
+ size = memparse(p + 1, &p);
+
+ if (!size)
+ return -EINVAL;
+
+ paddr = 0;
+
+ if (*p == '@') {
+ paddr = simple_strtoul(p + 1, &p, 16);
+
+ if (!paddr)
+ return -EINVAL;
+ }
+
+ paddrs[fbnum] = paddr;
+ sizes[fbnum] = size;
+
+ if (*p == 0)
+ break;
+
+ if (*p != ',')
+ return -EINVAL;
+
+ ++p;
+
+ start = p;
+ }
+
+ return 0;
+}
+
+#define MAX_FBS 10
+
+static int allocate_vram(struct drm_framebuffer *fb, int idx, int size)
+{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ struct drm_device *dev = fb->dev;
+ unsigned long sizes[MAX_FBS] = {0};
+ unsigned long paddrs[MAX_FBS] = {0};
+ unsigned long paddr = 0;
+ int ret = ret;
+
+ if (idx >= MAX_FBS) {
+ dev_err(dev->dev, "invalid fb number: %d\n", idx);
+ goto fail;
+ }
+
+ if (def_vram) {
+ if (parse_vram_param(def_vram, MAX_FBS, sizes, paddrs)) {
+ dev_err(dev->dev, "failed to parse vram parameter\n");
+ memset(&sizes, 0, sizeof(sizes));
+ memset(&paddrs, 0, sizeof(paddrs));
+ } else {
+ size = sizes[idx];
+ paddr = paddrs[idx];
+ }
+ }
+
+ size = PAGE_ALIGN(size);
+
+ if (paddr) {
+ DBG("reserving %d bytes at %lx for fb %d", size, paddr, idx);
+ ret = omap_vram_reserve(paddr, size);
+ } else {
+ DBG("allocating %d bytes for fb %d", size, idx);
+ ret = omap_vram_alloc(OMAP_VRAM_MEMTYPE_SDRAM, size, &paddr);
+ }
+
+ if (ret) {
+ dev_err(dev->dev, "failed to allocate vram\n");
+ goto fail;
+ }
+
+ omap_fb->size = size;
+ omap_fb->paddr = paddr;
+ omap_fb->vaddr = ioremap_wc(paddr, size);
+
+ if (!omap_fb->vaddr) {
+ dev_err(dev->dev, "failed to ioremap framebuffer\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ //memset(vaddr, 0, size);
+
+ return 0;
+
+fail:
+ if (omap_fb->paddr) {
+ omap_vram_free(paddr, size);
+ }
+
+ omap_fb->size = 0;
+ omap_fb->vaddr = NULL;
+ omap_fb->paddr = 0;
+
+ return ret;
+}
+
+static int omap_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ DBG("framebuffer: get handle: %p", omap_fb);
+
+ // TODO, I suppose this really should be some sort of GEM handle
+ // to the framebuffer object, in case it needs to be mapped or
+ // something. Right now this will go-exist badly with PVR, who
+ // implements the mmap() fxn.. need to think about how to handle
+ // this..
+
+ *handle = 42;
+
+ return 0;
+}
+
+static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ /* omap_vram_free() doesn't really do what you'd think.. (or at
+ * least not if you think it'd return vram to the pool), so
+ * disabling this for now until there is a better way to alloc
+ * and free coherant..
+ */
+ DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
+}
+
+static int omap_framebuffer_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned flags, unsigned color,
+ struct drm_clip_rect *clips, unsigned num_clips)
+{
+ int i;
+
+ for (i = 0; i < num_clips; i++) {
+ omap_framebuffer_flush(fb, clips[i].x1, clips[i].y1,
+ clips[i].x2 - clips[i].x1,
+ clips[i].y2 - clips[i].y1);
+ }
+
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
+ .create_handle = omap_framebuffer_create_handle,
+ .destroy = omap_framebuffer_destroy,
+ .dirty = omap_framebuffer_dirty,
+};
+
+int omap_framebuffer_get_buffer(struct drm_framebuffer *fb, int x, int y,
+ void **vaddr, unsigned long *paddr, int *screen_width)
+{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ int bpp = 4; //XXX fb->depth / 8;
+ unsigned long offset;
+
+ offset = (x * bpp) + (y * fb->pitch);
+
+ *vaddr = omap_fb->vaddr + offset;
+ *paddr = omap_fb->paddr + offset;
+ *screen_width = fb->pitch / bpp;
+
+ return omap_fb->size;
+}
+
+/* iterate thru all the connectors, returning ones that are attached
+ * to the same fb..
+ */
+struct drm_connector * omap_framebuffer_get_next_connector(
+ struct drm_framebuffer *fb, struct drm_connector *from)
+{
+ struct drm_device *dev = fb->dev;
+ struct list_head *connector_list = &dev->mode_config.connector_list;
+ struct drm_connector *connector = from;
+
+ if (!from) {
+ return list_first_entry(connector_list, typeof(*from), head);
+ }
+
+ list_for_each_entry_from(connector, connector_list, head) {
+ if (connector != from) {
+ struct drm_encoder *encoder = connector->encoder;
+ struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
+ if (crtc && crtc->fb == fb) {
+ return connector;
+ }
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(omap_framebuffer_get_next_connector);
+
+/* flush an area of the framebuffer (in case of manual update display that
+ * is not automatically flushed)
+ */
+void omap_framebuffer_flush(struct drm_framebuffer *fb,
+ int x, int y, int w, int h)
+{
+ struct drm_connector *connector = NULL;
+
+ VERB("flush: %d,%d %dx%d, fb=%p", x, y, w, h, fb);
+
+ while ((connector =
+ omap_framebuffer_get_next_connector(fb, connector))) {
+ /* only consider connectors that are part of a chain */
+ if (connector->encoder && connector->encoder->crtc) {
+ /* TODO: maybe this should propagate thru the crtc who
+ * could do the coordinate translation..
+ */
+ struct drm_crtc *crtc = connector->encoder->crtc;
+ int cx = max(0, x - crtc->x);
+ int cy = max(0, y - crtc->y);
+ int cw = w + (x - crtc->x) - cx;
+ int ch = h + (y - crtc->y) - cy;
+
+ omap_connector_flush(connector, cx, cy, cw, ch);
+ }
+ }
+}
+EXPORT_SYMBOL(omap_framebuffer_flush);
+
+struct drm_framebuffer * omap_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file, struct drm_mode_fb_cmd *mode_cmd)
+{
+ return omap_framebuffer_init(dev, mode_cmd);
+}
+
+struct drm_framebuffer * omap_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd *mode_cmd)
+{
+ struct omap_gpu_private *priv = dev->dev_private;
+ struct omap_framebuffer *omap_fb = NULL;
+ struct drm_framebuffer *fb;
+ int ret;
+
+ /* in case someone tries to feed us a completely bogus stride: */
+ mode_cmd->pitch = max(mode_cmd->pitch,
+ mode_cmd->width * mode_cmd->bpp / 8);
+
+ /* pvr needs to have a stride that is a multiple of 8 pixels: */
+ mode_cmd->pitch = ALIGN(mode_cmd->pitch, 8 * (mode_cmd->bpp / 8));
+
+ /* for now, we can only support a single fb.. so we need to resize
+ * the old one instead of creating a new one of different size. If
+ * we can eventually get rid of the vram pool and had a better way
+ * to allocate contiguous memory after boot time, this restriction
+ * should be lifted.
+ */
+ if (priv->fb) {
+ fb = priv->fb;
+ DBG("recycle: FB ID: %d (%p)", fb->base.id, fb);
+ goto recycle; /* ugg, we need CMA! */
+ }
+
+ DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d)", dev,
+ mode_cmd, mode_cmd->width, mode_cmd->height);
+
+ omap_fb = kzalloc(sizeof(*omap_fb), GFP_KERNEL);
+ if (!omap_fb) {
+ dev_err(dev->dev, "could not allocate fb\n");
+ goto fail;
+ }
+
+ fb = &omap_fb->base;
+ ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
+ if (ret) {
+ dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+ goto fail;
+ }
+
+ DBG("create: FB ID: %d (%p)", fb->base.id, fb);
+
+ ret = allocate_vram(fb, dev->primary->index,
+ mode_cmd->pitch * mode_cmd->height);
+ if (ret) {
+ dev_err(dev->dev, "failed to allocate framebuffer\n");
+ goto fail;
+ }
+
+recycle:
+ drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+
+ priv->fb = fb;
+
+ if (priv->fbdev) {
+ /* if fbdev is already created, we need to update it to
+ * be attached to the new fb
+ */
+ omap_fbdev_update(priv->fbdev, fb);
+ }
+
+ return fb;
+
+fail:
+ if (omap_fb) {
+ kfree(omap_fb);
+ }
+ return NULL;
+}
+
diff --git a/drivers/gpu/drm/omap/omap_fbdev.c b/drivers/gpu/drm/omap/omap_fbdev.c
new file mode 100644
index 00000000000..84b94dbfea4
--- /dev/null
+++ b/drivers/gpu/drm/omap/omap_fbdev.c
@@ -0,0 +1,298 @@
+/*
+ * linux/drivers/gpu/drm/omap/omap_fbdev.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/omap_gpu.h>
+#include "omap_gpu_priv.h"
+
+#include "drm_crtc.h"
+#include "drm_fb_helper.h"
+
+/*
+ * fbdev funcs, to implement legacy fbdev interface on top of drm driver
+ */
+
+#define to_omap_fbdev(x) container_of(x, struct omap_fbdev, base)
+
+struct omap_fbdev {
+ struct drm_fb_helper base;
+ struct drm_framebuffer *fb;
+};
+
+static ssize_t omap_fbdev_write(struct fb_info *fbi, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t res;
+
+ res = fb_sys_write(fbi, buf, count, ppos);
+ omap_fbdev_flush(fbi, 0, 0, fbi->var.xres, fbi->var.yres);
+
+ return res;
+}
+
+static void omap_fbdev_fillrect(struct fb_info *fbi,
+ const struct fb_fillrect *rect)
+{
+ sys_fillrect(fbi, rect);
+ omap_fbdev_flush(fbi, rect->dx, rect->dy, rect->width, rect->height);
+}
+
+static void omap_fbdev_copyarea(struct fb_info *fbi,
+ const struct fb_copyarea *area)
+{
+ sys_copyarea(fbi, area);
+ omap_fbdev_flush(fbi, area->dx, area->dy, area->width, area->height);
+}
+
+static void omap_fbdev_imageblit(struct fb_info *fbi,
+ const struct fb_image *image)
+{
+ sys_imageblit(fbi, image);
+ omap_fbdev_flush(fbi, image->dx, image->dy,
+ image->width, image->height);
+}
+
+static struct fb_ops omap_fb_ops = {
+ .owner = THIS_MODULE,
+
+ /* Note: to properly handle manual update displays, we wrap the
+ * basic fbdev ops which write to the framebuffer
+ */
+ .fb_read = fb_sys_read,
+ .fb_write = omap_fbdev_write,
+ .fb_fillrect = omap_fbdev_fillrect,
+ .fb_copyarea = omap_fbdev_copyarea,
+ .fb_imageblit = omap_fbdev_imageblit,
+
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int omap_fbdev_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct omap_fbdev *fbdev = to_omap_fbdev(helper);
+ struct drm_device *dev = helper->dev;
+ struct fb_info *fbi;
+ struct drm_mode_fb_cmd mode_cmd = {0};
+ struct device *device = &dev->platformdev->dev;
+ int ret;
+
+ /* only doing ARGB32 since this is what is needed to alpha-blend
+ * with video overlays:
+ */
+ sizes->surface_bpp = 32;
+ sizes->surface_depth = 32;
+
+ DBG("create fbdev: %dx%d@%d", sizes->surface_width,
+ sizes->surface_height, sizes->surface_bpp);
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+
+ mode_cmd.bpp = sizes->surface_bpp;
+ mode_cmd.depth = sizes->surface_depth;
+
+ mutex_lock(&dev->struct_mutex);
+
+ fbi = framebuffer_alloc(0, device);
+ if (!fbi) {
+ dev_err(dev->dev, "failed to allocate fb info\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ DBG("fbi=%p, dev=%p", fbi, dev);
+
+ fbdev->fb = omap_framebuffer_init(dev, &mode_cmd);
+ if (!fbdev->fb) {
+ dev_err(dev->dev, "failed to allocate fb\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ helper->fb = fbdev->fb;
+ helper->fbdev = fbi;
+
+ fbi->par = helper;
+ fbi->flags = FBINFO_DEFAULT;
+ fbi->fbops = &omap_fb_ops;
+
+ strcpy(fbi->fix.id, MODULE_NAME);
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ omap_fbdev_update(helper, fbdev->fb);
+
+ DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
+ DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+
+fail:
+ mutex_unlock(&dev->struct_mutex);
+ // TODO cleanup?
+ return ret;
+}
+
+static void omap_crtc_fb_gamma_set(struct drm_crtc *crtc,
+ u16 red, u16 green, u16 blue, int regno)
+{
+ DBG("fbdev: set gamma");
+ // XXX ignore?
+}
+
+static void omap_crtc_fb_gamma_get(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue, int regno)
+{
+ DBG("fbdev: get gamma");
+ // XXX ignore?
+}
+
+static int omap_fbdev_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = omap_fbdev_create(helper, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
+static struct drm_fb_helper_funcs omap_fb_helper_funcs = {
+ .gamma_set = omap_crtc_fb_gamma_set,
+ .gamma_get = omap_crtc_fb_gamma_get,
+ .fb_probe = omap_fbdev_probe,
+};
+
+static struct drm_fb_helper * get_fb(struct fb_info *fbi)
+{
+ if (!fbi || strcmp(fbi->fix.id, MODULE_NAME)) {
+ /* these are not the fb's you're looking for */
+ return NULL;
+ }
+ return fbi->par;
+}
+
+void omap_fbdev_update(struct drm_fb_helper *helper,
+ struct drm_framebuffer *fb)
+{
+ struct fb_info *fbi = helper->fbdev;
+ struct drm_device *dev = helper->dev;
+ struct omap_fbdev *fbdev = to_omap_fbdev(helper);
+ unsigned long paddr;
+ void __iomem *vaddr;
+ int size, screen_width;
+
+ DBG("update fbdev: %dx%d, fbi=%p", fb->width, fb->height, fbi);
+
+ fbdev->fb = fb;
+
+ drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth);
+ drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
+
+ size = omap_framebuffer_get_buffer(fbdev->fb, 0, 0,
+ &vaddr, &paddr, &screen_width);
+
+ dev->mode_config.fb_base = paddr;
+
+ fbi->screen_base = vaddr;
+ fbi->screen_size = size;
+ fbi->fix.smem_start = paddr;
+ fbi->fix.smem_len = size;
+}
+
+struct drm_connector * omap_fbdev_get_next_connector(struct fb_info *fbi,
+ struct drm_connector *from)
+{
+ struct drm_fb_helper *helper = get_fb(fbi);
+
+ if (!helper)
+ return NULL;
+
+ return omap_framebuffer_get_next_connector(helper->fb, from);
+}
+EXPORT_SYMBOL(omap_fbdev_get_next_connector);
+
+/* flush an area of the framebuffer (in case of manual update display that
+ * is not automatically flushed)
+ */
+void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h)
+{
+ struct drm_fb_helper *helper = get_fb(fbi);
+
+ if (!helper)
+ return;
+
+ VERB("flush fbdev: %d,%d %dx%d, fbi=%p", x, y, w, h, fbi);
+
+ omap_framebuffer_flush(helper->fb, x, y, w, h);
+}
+EXPORT_SYMBOL(omap_fbdev_flush);
+
+/* initialize fbdev helper */
+struct drm_fb_helper * omap_fbdev_init(struct drm_device *dev)
+{
+ struct omap_gpu_private *priv = dev->dev_private;
+ struct omap_fbdev *fbdev = NULL;
+ struct drm_fb_helper *helper;
+ int ret = 0;
+
+ fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev) {
+ dev_err(dev->dev, "could not allocate fbdev\n");
+ goto fail;
+ }
+
+ helper = &fbdev->base;
+
+ helper->funcs = &omap_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(dev, helper, priv->num_crtcs, 4);
+ if (ret) {
+ dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
+ goto fail;
+ }
+
+ drm_fb_helper_single_add_all_connectors(helper);
+ drm_fb_helper_initial_config(helper, 32);
+
+ priv->fbdev = helper;
+
+ return helper;
+
+fail:
+ if (fbdev) {
+ kfree(fbdev);
+ }
+ return NULL;
+}
diff --git a/drivers/gpu/drm/omap/omap_gpu.c b/drivers/gpu/drm/omap/omap_gpu.c
new file mode 100644
index 00000000000..e63e27af46b
--- /dev/null
+++ b/drivers/gpu/drm/omap/omap_gpu.c
@@ -0,0 +1,752 @@
+/*
+ * linux/drivers/gpu/drm/omap/omap_gpu.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/omap_gpu.h>
+#include "omap_gpu_priv.h"
+
+#include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
+
+#define DRIVER_NAME MODULE_NAME
+#define DRIVER_DESC "OMAP GPU"
+#define DRIVER_DATE "20110403"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+struct drm_device *drm_device;
+
+/* TODO: think about how to handle more than one plugin.. ie. some ops
+ * me might want to stop on the first plugin that doesn't return an
+ * error, etc..
+ */
+LIST_HEAD(plugin_list);
+
+/* keep track of whether we are already loaded.. we may need to call
+ * plugin's load() if they register after we are already loaded
+ */
+static bool loaded = false;
+
+/*
+ * mode config funcs
+ */
+
+/* Notes about mapping DSS and DRM entities:
+ * CRTC: overlay
+ * encoder: manager.. with some extension to allow one primary CRTC
+ * and zero or more video CRTC's to be mapped to one encoder?
+ * connector: dssdev.. manager can be attached/detached from different
+ * devices
+ */
+
+static void omap_fb_output_poll_changed(struct drm_device *dev)
+{
+ struct omap_gpu_private *priv = dev->dev_private;
+ DBG("dev=%p", dev);
+ if (priv->fbdev) {
+ drm_fb_helper_hotplug_event(priv->fbdev);
+ }
+}
+
+static struct drm_mode_config_funcs omap_mode_config_funcs = {
+ .fb_create = omap_framebuffer_create,
+ .output_poll_changed = omap_fb_output_poll_changed,
+};
+
+static int get_connector_type(struct omap_dss_device *dssdev)
+{
+ switch (dssdev->type) {
+ case OMAP_DISPLAY_TYPE_DPI:
+ if (!strcmp(dssdev->name, "dvi"))
+ return DRM_MODE_CONNECTOR_DVID;
+ default:
+ return DRM_MODE_CONNECTOR_Unknown;
+ }
+}
+
+static int omap_gpu_notifier(struct notifier_block *nb,
+ unsigned long evt, void *arg)
+{
+ switch (evt) {
+ case OMAP_DSS_SIZE_CHANGE:
+ case OMAP_DSS_HOTPLUG_CONNECT:
+ case OMAP_DSS_HOTPLUG_DISCONNECT: {
+ struct drm_device *dev = drm_device;
+ DBG("hotplug event: evt=%d, dev=%p", evt, dev);
+ if (dev) {
+ drm_sysfs_hotplug_event(dev);
+ }
+ return NOTIFY_OK;
+ }
+ default: /* don't care about other events for now */
+ return NOTIFY_DONE;
+ }
+}
+
+static void dump_video_chains(void)
+{
+ int i;
+
+ DBG("dumping video chains: ");
+ for (i = 0; i < omap_dss_get_num_overlays(); i++) {
+ struct omap_overlay *ovl = omap_dss_get_overlay(i);
+ struct omap_overlay_manager *mgr = ovl->manager;
+ struct omap_dss_device *dssdev = mgr ? mgr->device : NULL;
+ if (dssdev) {
+ DBG("%d: %s -> %s -> %s", i, ovl->name, mgr->name,
+ dssdev->name);
+ } else if (mgr) {
+ DBG("%d: %s -> %s", i, ovl->name, mgr->name);
+ } else {
+ DBG("%d: %s", i, ovl->name);
+ }
+ }
+}
+
+static int omap_modeset_init(struct drm_device *dev)
+{
+ const struct omap_gpu_platform_data *pdata = dev->dev->platform_data;
+ struct omap_gpu_private *priv = dev->dev_private;
+ struct omap_dss_device *dssdev = NULL;
+ int i, j;
+ unsigned int connected_connectors = 0;
+
+ /* create encoders for each manager */
+ int create_encoder(int i) {
+ struct omap_overlay_manager *mgr =
+ omap_dss_get_overlay_manager(i);
+ struct drm_encoder *encoder = omap_encoder_init(dev, mgr);
+
+ if (!encoder) {
+ dev_err(dev->dev, "could not create encoder\n");
+ return -ENOMEM;
+ }
+
+ priv->encoders[priv->num_encoders++] = encoder;
+
+ return 0;
+ }
+
+ /* create connectors for each display device */
+ int create_connector(struct omap_dss_device *dssdev) {
+ static struct notifier_block *notifier;
+ struct drm_connector *connector;
+
+ if (!dssdev->driver) {
+ dev_warn(dev->dev, "%s has no driver.. skipping it\n",
+ dssdev->name);
+ return 0;
+ }
+
+ if (!(dssdev->driver->get_timings ||
+ dssdev->driver->get_edid)) {
+ dev_warn(dev->dev, "%s driver does not support "
+ "get_timings or get_edid.. skipping it!\n",
+ dssdev->name);
+ return 0;
+ }
+
+ connector = omap_connector_init(dev,
+ get_connector_type(dssdev), dssdev);
+
+ if (!connector) {
+ dev_err(dev->dev, "could not create connector\n");
+ return -ENOMEM;
+ }
+
+ /* track what is already connected.. rather than looping thru
+ * all connectors twice later, first for connected then for
+ * remainder (which could be a race condition if connected
+ * status changes)
+ */
+ if (omap_connector_detect(connector, true) ==
+ connector_status_connected) {
+ connected_connectors |= (1 << priv->num_connectors);
+ }
+
+ priv->connectors[priv->num_connectors++] = connector;
+
+ notifier = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
+ notifier->notifier_call = omap_gpu_notifier;
+ omap_dss_add_notify(dssdev, notifier);
+
+ for (j = 0; j < priv->num_encoders; j++) {
+ struct omap_overlay_manager *mgr =
+ omap_encoder_get_manager(priv->encoders[j]);
+ if (mgr->device == dssdev) {
+ drm_mode_connector_attach_encoder(connector,
+ priv->encoders[j]);
+ }
+ }
+
+ return 0;
+ }
+
+ /* create up to max_overlays CRTCs mapping to overlays.. by default,
+ * connect the overlays to different managers/encoders, giving priority
+ * to encoders connected to connectors with a detected connection
+ */
+ int create_crtc(int i) {
+ struct omap_overlay *ovl = omap_dss_get_overlay(i);
+ struct omap_overlay_manager *mgr = NULL;
+ struct drm_crtc *crtc;
+
+ if (ovl->manager) {
+ DBG("disconnecting %s from %s", ovl->name,
+ ovl->manager->name);
+ ovl->unset_manager(ovl);
+ }
+
+ /* find next best connector, ones with detected connection first
+ */
+ while (j < priv->num_connectors && !mgr) {
+ if (connected_connectors & (1 << j)) {
+ struct drm_encoder * encoder =
+ omap_connector_attached_encoder(
+ priv->connectors[j]);
+ if (encoder) {
+ mgr = omap_encoder_get_manager(encoder);
+ }
+ }
+ j++;
+ }
+
+ /* if we couldn't find another connected connector, lets start
+ * looking at the unconnected connectors:
+ */
+ while (j < 2 * priv->num_connectors && !mgr) {
+ int idx = j - priv->num_connectors;
+ if (!(connected_connectors & (1 << idx))) {
+ struct drm_encoder * encoder =
+ omap_connector_attached_encoder(
+ priv->connectors[idx]);
+ if (encoder) {
+ mgr = omap_encoder_get_manager(encoder);
+ }
+ }
+ j++;
+ }
+
+ if (mgr) {
+ DBG("connecting %s to %s", ovl->name, mgr->name);
+ ovl->set_manager(ovl, mgr);
+ }
+
+ crtc = omap_crtc_init(dev, ovl);
+
+ if (!crtc) {
+ dev_err(dev->dev, "could not create CRTC\n");
+ return -ENOMEM;
+ }
+
+ priv->crtcs[priv->num_crtcs++] = crtc;
+
+ return 0;
+ }
+
+ drm_mode_config_init(dev);
+
+ if (pdata) {
+ /* if platform data is provided by the board file, use it to
+ * control which overlays, managers, and devices we own.
+ */
+ for (i = 0; i < pdata->mgr_cnt; i++) {
+ if (create_encoder(pdata->mgr_ids[i])) {
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < pdata->dev_cnt; i++) {
+ int m(struct omap_dss_device *dssdev, void *data) {
+ return ! strcmp(dssdev->name, data);
+ }
+ struct omap_dss_device *dssdev =
+ omap_dss_find_device(
+ (void *)pdata->dev_names[i], m);
+ if (!dssdev) {
+ dev_warn(dev->dev, "no such dssdev: %s\n",
+ pdata->dev_names[i]);
+ continue;
+ }
+ if (create_connector(dssdev)) {
+ goto fail;
+ }
+ }
+
+ j = 0;
+ for (i = 0; i < pdata->ovl_cnt; i++) {
+ if (create_crtc(pdata->ovl_ids[i])) {
+ goto fail;
+ }
+ }
+ } else {
+ /* otherwise just grab up to CONFIG_DRM_OMAP_NUM_CRTCS and try
+ * to make educated guesses about everything else
+ */
+ int max_overlays = min(omap_dss_get_num_overlays(),
+ CONFIG_DRM_OMAP_NUM_CRTCS);
+
+ for (i = 0; i < omap_dss_get_num_overlay_managers(); i++) {
+ if (create_encoder(i)) {
+ goto fail;
+ }
+ }
+
+ for_each_dss_dev(dssdev) {
+ if (create_connector(dssdev)) {
+ goto fail;
+ }
+ }
+
+ j = 0;
+ for (i = 0; i < max_overlays; i++) {
+ if (create_crtc(i)) {
+ goto fail;
+ }
+ }
+ }
+
+ /* for now keep the mapping of CRTCs and encoders static.. */
+ for (i = 0; i < priv->num_encoders; i++) {
+ struct drm_encoder *encoder = priv->encoders[i];
+ struct omap_overlay_manager *mgr =
+ omap_encoder_get_manager(encoder);
+
+ encoder->possible_crtcs = 0;
+
+ for (j = 0; j < priv->num_crtcs; j++) {
+ struct omap_overlay *ovl =
+ omap_crtc_get_overlay(priv->crtcs[j]);
+ if (ovl->manager == mgr) {
+ encoder->possible_crtcs |= (1 << j);
+ }
+ }
+
+ DBG("%s: possible_crtcs=%08x", mgr->name,
+ encoder->possible_crtcs);
+ }
+
+ dump_video_chains();
+
+ dev->mode_config.min_width = 640;
+ dev->mode_config.min_height = 480;
+
+ /* note: pvr can't currently handle dst surfaces larger than 2k by 2k */
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+
+ dev->mode_config.funcs = &omap_mode_config_funcs;
+
+ drm_kms_helper_poll_init(dev);
+
+ return 0;
+
+fail:
+ /* TODO: cleanup what has been created so far */
+ return -EINVAL;
+}
+
+/*
+ * drm driver funcs
+ */
+
+/**
+ * load - setup chip and create an initial config
+ * @dev: DRM device
+ * @flags: startup flags
+ *
+ * The driver load routine has to do several things:
+ * - initialize the memory manager
+ * - allocate initial config memory
+ * - setup the DRM framebuffer with the allocated memory
+ */
+static int dev_load(struct drm_device *dev, unsigned long flags)
+{
+ struct omap_gpu_private *priv;
+ struct omap_gpu_plugin *plugin;
+ int ret;
+
+ DBG("load: dev=%p", dev);
+
+ drm_device = dev;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev->dev, "could not allocate priv\n");
+ return -1;
+ }
+
+ dev->dev_private = priv;
+
+ ret = omap_modeset_init(dev);
+ if (ret) {
+ dev_err(dev->dev, "omap_modeset_init failed: ret=%d\n", ret);
+ // hmm
+ //return ret;
+ }
+
+ priv->fbdev = omap_fbdev_init(dev);
+ if (!priv->fbdev) {
+ dev_err(dev->dev, "omap_fbdev_init failed\n");
+ ret = -ENOMEM;
+ // hmm
+ //return ret;
+ }
+
+ loaded = true;
+
+ list_for_each_entry(plugin, &plugin_list, list) {
+ ret = plugin->load(dev, flags);
+ }
+
+ return 0;
+}
+
+static int dev_unload(struct drm_device *dev)
+{
+ struct omap_gpu_plugin *plugin;
+ int ret;
+
+ DBG("unload: dev=%p", dev);
+
+ list_for_each_entry(plugin, &plugin_list, list) {
+ ret = plugin->unload(dev);
+ }
+
+ drm_kms_helper_poll_fini(dev);
+
+ loaded = false;
+
+ return 0;
+}
+
+static int dev_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct omap_gpu_plugin *plugin;
+ bool found_pvr = false;
+ int ret;
+
+ file->driver_priv = NULL;
+
+ DBG("open: dev=%p, file=%p", dev, file);
+
+ list_for_each_entry(plugin, &plugin_list, list) {
+ if (!strcmp(DRIVER_NAME "_pvr", plugin->name)) {
+ found_pvr = true;
+ break;
+ }
+ }
+
+ if (!found_pvr) {
+ DBG("open: PVR submodule not loaded.. let's try now");
+ request_module(DRIVER_NAME "_pvr");
+ }
+
+ list_for_each_entry(plugin, &plugin_list, list) {
+ ret = plugin->open(dev, file);
+ }
+
+ return 0;
+}
+
+static int dev_firstopen(struct drm_device *dev)
+{
+ DBG("firstopen: dev=%p", dev);
+ return 0;
+}
+
+/**
+ * lastclose - clean up after all DRM clients have exited
+ * @dev: DRM device
+ *
+ * Take care of cleaning up after all DRM clients have exited. In the
+ * mode setting case, we want to restore the kernel's initial mode (just
+ * in case the last client left us in a bad state).
+ *
+ * Additionally, in the non-mode setting case, we'll tear down the AGP
+ * and DMA structures, since the kernel won't be using them, and clean
+ * up any GEM state.
+ */
+static void dev_lastclose(struct drm_device * dev)
+{
+ DBG("lastclose: dev=%p", dev);
+}
+
+static void dev_preclose(struct drm_device * dev, struct drm_file *file)
+{
+ DBG("preclose: dev=%p", dev);
+}
+
+static void dev_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct omap_gpu_plugin *plugin;
+ int ret;
+
+ DBG("postclose: dev=%p, file=%p", dev, file);
+
+ list_for_each_entry(plugin, &plugin_list, list) {
+ ret = plugin->release(dev, file);
+ }
+
+ return;
+}
+
+/**
+ * enable_vblank - enable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Enable vblank interrupts for @crtc. If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ *
+ * RETURNS
+ * Zero on success, appropriate errno if the given @crtc's vblank
+ * interrupt cannot be enabled.
+ */
+static int dev_enable_vblank(struct drm_device *dev, int crtc)
+{
+ DBG("enable_vblank: dev=%p, crtc=%d", dev, crtc);
+ return 0;
+}
+
+/**
+ * disable_vblank - disable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Disable vblank interrupts for @crtc. If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ */
+static void dev_disable_vblank(struct drm_device *dev, int crtc)
+{
+ DBG("disable_vblank: dev=%p, crtc=%d", dev, crtc);
+}
+
+/**
+ * Called by \c drm_device_is_agp. Typically used to determine if a
+ * card is really attached to AGP or not.
+ *
+ * \param dev DRM device handle
+ *
+ * \returns
+ * One of three values is returned depending on whether or not the
+ * card is absolutely \b not AGP (return of 0), absolutely \b is AGP
+ * (return of 1), or may or may not be AGP (return of 2).
+ */
+static int dev_device_is_agp(struct drm_device *dev)
+{
+ return 0;
+}
+
+static irqreturn_t dev_irq_handler(DRM_IRQ_ARGS)
+{
+ return IRQ_HANDLED;
+}
+
+static void dev_irq_preinstall(struct drm_device *dev)
+{
+ DBG("irq_preinstall: dev=%p", dev);
+}
+
+static int dev_irq_postinstall(struct drm_device *dev)
+{
+ DBG("irq_postinstall: dev=%p", dev);
+ return 0;
+}
+
+static void dev_irq_uninstall(struct drm_device *dev)
+{
+ DBG("irq_uninstall: dev=%p", dev);
+}
+
+static int fop_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct omap_gpu_plugin *plugin;
+ int ret = 0;
+
+ list_for_each_entry(plugin, &plugin_list, list) {
+ ret = plugin->mmap(file, vma);
+ if (!ret) {
+ /* on first plugin that succeeds, bail out of iteration */
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static struct drm_driver omap_gpu_driver;
+
+static int pdev_suspend(struct platform_device *pDevice, pm_message_t state)
+{
+ DBG("pdev_suspend");
+ return 0;
+}
+
+static int pdev_resume(struct platform_device *device)
+{
+ DBG("pdev_resume");
+ return 0;
+}
+
+static void pdev_shutdown(struct platform_device *device)
+{
+ DBG("pdev_shutdown");
+}
+
+static int pdev_probe(struct platform_device *device)
+{
+ DBG("pdev_probe: %s", device->name);
+ return drm_get_platform_dev(device, &omap_gpu_driver);
+}
+
+static int pdev_remove(struct platform_device *device)
+{
+ DBG("pdev_remove");
+ drm_put_dev(drm_device);
+ return 0;
+}
+
+struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {{0}};
+
+static struct drm_driver omap_gpu_driver = {
+ .driver_features = DRIVER_HAVE_IRQ |
+ DRIVER_USE_PLATFORM_DEVICE | DRIVER_MODESET,
+ .load = dev_load,
+ .unload = dev_unload,
+ .open = dev_open,
+ .firstopen = dev_firstopen,
+ .lastclose = dev_lastclose,
+ .preclose = dev_preclose,
+ .postclose = dev_postclose,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = dev_enable_vblank,
+ .disable_vblank = dev_disable_vblank,
+ .device_is_agp = dev_device_is_agp,
+ .irq_preinstall = dev_irq_preinstall,
+ .irq_postinstall = dev_irq_postinstall,
+ .irq_uninstall = dev_irq_uninstall,
+ .irq_handler = dev_irq_handler,
+ .reclaim_buffers = drm_core_reclaim_buffers,
+ .ioctls = ioctls,
+ .num_ioctls = 0,
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .unlocked_ioctl = drm_ioctl,
+ .release = drm_release,
+ .mmap = fop_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+ },
+ .platform_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = pdev_probe,
+ .remove = pdev_remove,
+ .suspend = pdev_suspend,
+ .resume = pdev_resume,
+ .shutdown = pdev_shutdown,
+ },
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
+
+int omap_gpu_register_plugin(struct omap_gpu_plugin *plugin)
+{
+ struct drm_device *dev = drm_device;
+ int i;
+
+ DBG("register plugin: %p (%s)", plugin, plugin->name);
+
+ /* XXX: PVR code uses drm_file->driver_priv...
+ * need to come up with some sane way to handle this.
+ */
+
+ list_add_tail(&plugin->list, &plugin_list);
+
+ /* register the plugin's ioctl's */
+ for (i = 0; i < plugin->num_ioctls; i++) {
+ int nr = i + plugin->ioctl_start;
+
+ /* check for out of bounds ioctl nr or already registered ioctl */
+ if (nr > ARRAY_SIZE(ioctls) || ioctls[nr].func) {
+ dev_err(dev->dev, "invalid ioctl: %d (nr=%d)\n", i, nr);
+ return -EINVAL;
+ }
+
+ DBG("register ioctl: %d %08x", nr, plugin->ioctls[i].cmd);
+
+ ioctls[nr] = plugin->ioctls[i];
+
+ if (nr >= omap_gpu_driver.num_ioctls) {
+ omap_gpu_driver.num_ioctls = nr + 1;
+ }
+ }
+
+ if (loaded) {
+ plugin->load(dev, 0);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(omap_gpu_register_plugin);
+
+int omap_gpu_unregister_plugin(struct omap_gpu_plugin *plugin)
+{
+ list_del(&plugin->list);
+ return 0;
+}
+EXPORT_SYMBOL(omap_gpu_unregister_plugin);
+
+struct fb_info * omap_gpu_get_fbdev(struct drm_device *dev)
+{
+ struct omap_gpu_private *priv = dev->dev_private;
+ return priv->fbdev->fbdev;
+}
+EXPORT_SYMBOL(omap_gpu_get_fbdev);
+
+static int __init omap_gpu_init(void)
+{
+ DBG("init");
+ return drm_init(&omap_gpu_driver);
+}
+
+static void __exit omap_gpu_fini(void)
+{
+ DBG("fini");
+ drm_exit(&omap_gpu_driver);
+}
+
+/* need late_initcall() so we load after dss_driver's are loaded */
+late_initcall(omap_gpu_init);
+module_exit(omap_gpu_fini);
+
+MODULE_AUTHOR("Rob Clark <rob@ti.com>");
+MODULE_DESCRIPTION("OMAP DRM Display Driver");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/omap/omap_gpu_priv.h b/drivers/gpu/drm/omap/omap_gpu_priv.h
new file mode 100644
index 00000000000..4af361d83b5
--- /dev/null
+++ b/drivers/gpu/drm/omap/omap_gpu_priv.h
@@ -0,0 +1,80 @@
+/*
+ * linux/drivers/gpu/drm/omap/omap_gpu_priv.h
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAP_GPU_PRIV_H__
+#define __OMAP_GPU_PRIV_H__
+
+#include <plat/display.h>
+#include <linux/module.h>
+
+#define DBG(fmt,...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+#define VERB(fmt,...) do { } while (0) /* verbose debug */
+
+#define MODULE_NAME "omap_gpu"
+
+struct omap_gpu_private {
+ int num_crtcs;
+ struct drm_crtc *crtcs[8];
+ int num_encoders;
+ struct drm_encoder *encoders[8];
+ int num_connectors;
+ struct drm_connector *connectors[8];
+
+ struct drm_fb_helper *fbdev;
+
+ /* for now, we statically create a single framebuffer per device, since
+ * there is not yet any good way to dynamically allocate/free contiguous
+ * memory..
+ */
+ struct drm_framebuffer *fb;
+};
+
+struct drm_fb_helper * omap_fbdev_init(struct drm_device *dev);
+void omap_fbdev_update(struct drm_fb_helper *helper,
+ struct drm_framebuffer *fb);
+
+struct drm_crtc * omap_crtc_init(struct drm_device *dev,
+ struct omap_overlay *ovl);
+struct omap_overlay * omap_crtc_get_overlay(struct drm_crtc *crtc);
+
+struct drm_encoder * omap_encoder_init(struct drm_device *dev,
+ struct omap_overlay_manager *mgr);
+struct omap_overlay_manager * omap_encoder_get_manager(
+ struct drm_encoder *encoder);
+struct drm_encoder * omap_connector_attached_encoder (
+ struct drm_connector *connector);
+enum drm_connector_status omap_connector_detect(
+ struct drm_connector *connector, bool force);
+
+struct drm_connector * omap_connector_init(struct drm_device *dev,
+ int connector_type, struct omap_dss_device *dssdev);
+void omap_connector_mode_set(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+void omap_connector_flush(struct drm_connector *connector,
+ int x, int y, int w, int h);
+void omap_connector_dpms(struct drm_connector *connector, int mode);
+
+struct drm_framebuffer * omap_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd *mode_cmd);
+struct drm_framebuffer * omap_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file, struct drm_mode_fb_cmd *mode_cmd);
+int omap_framebuffer_get_buffer(struct drm_framebuffer *fb, int x, int y,
+ void **vaddr, unsigned long *paddr, int *screen_width);
+
+#endif /* __OMAP_GPU_PRIV_H__ */
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 297bc9a7d6e..cb3d89575b3 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -941,6 +941,16 @@ config SENSORS_TMP421
This driver can also be built as a module. If so, the module
will be called tmp421.
+config SENSORS_TWL4030_MADC
+ tristate "Texas Instruments TWL4030 MADC Hwmon"
+ depends on TWL4030_MADC
+ help
+ If you say yes here you get hwmon support for triton
+ TWL4030-MADC.
+
+ This driver can also be built as a module. If so it will be called
+ twl4030-madc-hwmon.
+
config SENSORS_VIA_CPUTEMP
tristate "VIA CPU temperature sensor"
depends on X86
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index dde02d99c23..bc7d740886f 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -102,6 +102,7 @@ obj-$(CONFIG_SENSORS_THMC50) += thmc50.o
obj-$(CONFIG_SENSORS_TMP102) += tmp102.o
obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
+obj-$(CONFIG_SENSORS_TWL4030_MADC)+= twl4030-madc-hwmon.o
obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
obj-$(CONFIG_SENSORS_VIA686A) += via686a.o
obj-$(CONFIG_SENSORS_VT1211) += vt1211.o
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
index 1c8b3d9e205..fea292d4340 100644
--- a/drivers/hwmon/jz4740-hwmon.c
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -32,7 +32,7 @@ struct jz4740_hwmon {
int irq;
- struct mfd_cell *cell;
+ const struct mfd_cell *cell;
struct device *hwmon;
struct completion read_completion;
@@ -112,7 +112,7 @@ static int __devinit jz4740_hwmon_probe(struct platform_device *pdev)
return -ENOMEM;
}
- hwmon->cell = pdev->dev.platform_data;
+ hwmon->cell = mfd_get_cell(pdev);
hwmon->irq = platform_get_irq(pdev, 0);
if (hwmon->irq < 0) {
diff --git a/drivers/hwmon/twl4030-madc-hwmon.c b/drivers/hwmon/twl4030-madc-hwmon.c
new file mode 100644
index 00000000000..97e22bef85a
--- /dev/null
+++ b/drivers/hwmon/twl4030-madc-hwmon.c
@@ -0,0 +1,157 @@
+/*
+ *
+ * TWL4030 MADC Hwmon driver-This driver monitors the real time
+ * conversion of analog signals like battery temperature,
+ * battery type, battery level etc. User can ask for the conversion on a
+ * particular channel using the sysfs nodes.
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * J Keerthy <j-keerthy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/i2c/twl.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/i2c/twl4030-madc.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/stddef.h>
+#include <linux/sysfs.h>
+#include <linux/err.h>
+#include <linux/types.h>
+
+/*
+ * sysfs hook function
+ */
+static ssize_t madc_read(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct twl4030_madc_request req;
+ long val;
+
+ req.channels = (1 << attr->index);
+ req.method = TWL4030_MADC_SW2;
+ req.func_cb = NULL;
+ val = twl4030_madc_conversion(&req);
+ if (val < 0)
+ return val;
+
+ return sprintf(buf, "%d\n", req.rbuf[attr->index]);
+}
+
+/* sysfs nodes to read individual channels from user side */
+static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, madc_read, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, madc_read, NULL, 1);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, madc_read, NULL, 2);
+static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, madc_read, NULL, 3);
+static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, madc_read, NULL, 4);
+static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, madc_read, NULL, 5);
+static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, madc_read, NULL, 6);
+static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, madc_read, NULL, 7);
+static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, madc_read, NULL, 8);
+static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, madc_read, NULL, 9);
+static SENSOR_DEVICE_ATTR(curr10_input, S_IRUGO, madc_read, NULL, 10);
+static SENSOR_DEVICE_ATTR(in11_input, S_IRUGO, madc_read, NULL, 11);
+static SENSOR_DEVICE_ATTR(in12_input, S_IRUGO, madc_read, NULL, 12);
+static SENSOR_DEVICE_ATTR(in15_input, S_IRUGO, madc_read, NULL, 15);
+
+static struct attribute *twl4030_madc_attributes[] = {
+ &sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in3_input.dev_attr.attr,
+ &sensor_dev_attr_in4_input.dev_attr.attr,
+ &sensor_dev_attr_in5_input.dev_attr.attr,
+ &sensor_dev_attr_in6_input.dev_attr.attr,
+ &sensor_dev_attr_in7_input.dev_attr.attr,
+ &sensor_dev_attr_in8_input.dev_attr.attr,
+ &sensor_dev_attr_in9_input.dev_attr.attr,
+ &sensor_dev_attr_curr10_input.dev_attr.attr,
+ &sensor_dev_attr_in11_input.dev_attr.attr,
+ &sensor_dev_attr_in12_input.dev_attr.attr,
+ &sensor_dev_attr_in15_input.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group twl4030_madc_group = {
+ .attrs = twl4030_madc_attributes,
+};
+
+static int __devinit twl4030_madc_hwmon_probe(struct platform_device *pdev)
+{
+ int ret;
+ int status;
+ struct device *hwmon;
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &twl4030_madc_group);
+ if (ret)
+ goto err_sysfs;
+ hwmon = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(hwmon)) {
+ dev_err(&pdev->dev, "hwmon_device_register failed.\n");
+ status = PTR_ERR(hwmon);
+ goto err_reg;
+ }
+
+ return 0;
+
+err_reg:
+ sysfs_remove_group(&pdev->dev.kobj, &twl4030_madc_group);
+err_sysfs:
+
+ return ret;
+}
+
+static int __devexit twl4030_madc_hwmon_remove(struct platform_device *pdev)
+{
+ hwmon_device_unregister(&pdev->dev);
+ sysfs_remove_group(&pdev->dev.kobj, &twl4030_madc_group);
+
+ return 0;
+}
+
+static struct platform_driver twl4030_madc_hwmon_driver = {
+ .probe = twl4030_madc_hwmon_probe,
+ .remove = __exit_p(twl4030_madc_hwmon_remove),
+ .driver = {
+ .name = "twl4030_madc_hwmon",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init twl4030_madc_hwmon_init(void)
+{
+ return platform_driver_register(&twl4030_madc_hwmon_driver);
+}
+
+module_init(twl4030_madc_hwmon_init);
+
+static void __exit twl4030_madc_hwmon_exit(void)
+{
+ platform_driver_unregister(&twl4030_madc_hwmon_driver);
+}
+
+module_exit(twl4030_madc_hwmon_exit);
+
+MODULE_DESCRIPTION("TWL4030 ADC Hwmon driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("J Keerthy");
+MODULE_ALIAS("twl4030_madc_hwmon");
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig
new file mode 100644
index 00000000000..1f29bab6b3e
--- /dev/null
+++ b/drivers/hwspinlock/Kconfig
@@ -0,0 +1,23 @@
+#
+# Generic HWSPINLOCK framework
+#
+
+config HWSPINLOCK
+ tristate "Generic Hardware Spinlock framework"
+ depends on ARCH_OMAP4
+ help
+ Say y here to support the generic hardware spinlock framework.
+ You only need to enable this if you have hardware spinlock module
+ on your system (usually only relevant if your system has remote slave
+ coprocessors).
+
+ If unsure, say N.
+
+config HWSPINLOCK_OMAP
+ tristate "OMAP Hardware Spinlock device"
+ depends on HWSPINLOCK && ARCH_OMAP4
+ help
+ Say y here to support the OMAP Hardware Spinlock device (firstly
+ introduced in OMAP4).
+
+ If unsure, say N.
diff --git a/drivers/hwspinlock/Makefile b/drivers/hwspinlock/Makefile
new file mode 100644
index 00000000000..5729a3f7ed3
--- /dev/null
+++ b/drivers/hwspinlock/Makefile
@@ -0,0 +1,6 @@
+#
+# Generic Hardware Spinlock framework
+#
+
+obj-$(CONFIG_HWSPINLOCK) += hwspinlock_core.o
+obj-$(CONFIG_HWSPINLOCK_OMAP) += omap_hwspinlock.o
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
new file mode 100644
index 00000000000..43a62714b4f
--- /dev/null
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -0,0 +1,548 @@
+/*
+ * Hardware spinlock framework
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Contact: Ohad Ben-Cohen <ohad@wizery.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/jiffies.h>
+#include <linux/radix-tree.h>
+#include <linux/hwspinlock.h>
+#include <linux/pm_runtime.h>
+
+#include "hwspinlock_internal.h"
+
+/* radix tree tags */
+#define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */
+
+/*
+ * A radix tree is used to maintain the available hwspinlock instances.
+ * The tree associates hwspinlock pointers with their integer key id,
+ * and provides easy-to-use API which makes the hwspinlock core code simple
+ * and easy to read.
+ *
+ * Radix trees are quick on lookups, and reasonably efficient in terms of
+ * storage, especially with high density usages such as this framework
+ * requires (a continuous range of integer keys, beginning with zero, is
+ * used as the ID's of the hwspinlock instances).
+ *
+ * The radix tree API supports tagging items in the tree, which this
+ * framework uses to mark unused hwspinlock instances (see the
+ * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the
+ * tree, looking for an unused hwspinlock instance, is now reduced to a
+ * single radix tree API call.
+ */
+static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
+
+/*
+ * Synchronization of access to the tree is achieved using this spinlock,
+ * as the radix-tree API requires that users provide all synchronisation.
+ */
+static DEFINE_SPINLOCK(hwspinlock_tree_lock);
+
+/**
+ * __hwspin_trylock() - attempt to lock a specific hwspinlock
+ * @hwlock: an hwspinlock which we want to trylock
+ * @mode: controls whether local interrupts are disabled or not
+ * @flags: a pointer where the caller's interrupt state will be saved at (if
+ * requested)
+ *
+ * This function attempts to lock an hwspinlock, and will immediately
+ * fail if the hwspinlock is already taken.
+ *
+ * Upon a successful return from this function, preemption (and possibly
+ * interrupts) is disabled, so the caller must not sleep, and is advised to
+ * release the hwspinlock as soon as possible. This is required in order to
+ * minimize remote cores polling on the hardware interconnect.
+ *
+ * The user decides whether local interrupts are disabled or not, and if yes,
+ * whether he wants their previous state to be saved. It is up to the user
+ * to choose the appropriate @mode of operation, exactly the same way users
+ * should decide between spin_trylock, spin_trylock_irq and
+ * spin_trylock_irqsave.
+ *
+ * Returns 0 if we successfully locked the hwspinlock or -EBUSY if
+ * the hwspinlock was already taken.
+ * This function will never sleep.
+ */
+int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
+{
+ int ret;
+
+ BUG_ON(!hwlock);
+ BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
+
+ /*
+ * This spin_lock{_irq, _irqsave} serves three purposes:
+ *
+ * 1. Disable preemption, in order to minimize the period of time
+ * in which the hwspinlock is taken. This is important in order
+ * to minimize the possible polling on the hardware interconnect
+ * by a remote user of this lock.
+ * 2. Make the hwspinlock SMP-safe (so we can take it from
+ * additional contexts on the local host).
+ * 3. Ensure that in_atomic/might_sleep checks catch potential
+ * problems with hwspinlock usage (e.g. scheduler checks like
+ * 'scheduling while atomic' etc.)
+ */
+ if (mode == HWLOCK_IRQSTATE)
+ ret = spin_trylock_irqsave(&hwlock->lock, *flags);
+ else if (mode == HWLOCK_IRQ)
+ ret = spin_trylock_irq(&hwlock->lock);
+ else
+ ret = spin_trylock(&hwlock->lock);
+
+ /* is lock already taken by another context on the local cpu ? */
+ if (!ret)
+ return -EBUSY;
+
+ /* try to take the hwspinlock device */
+ ret = hwlock->ops->trylock(hwlock);
+
+ /* if hwlock is already taken, undo spin_trylock_* and exit */
+ if (!ret) {
+ if (mode == HWLOCK_IRQSTATE)
+ spin_unlock_irqrestore(&hwlock->lock, *flags);
+ else if (mode == HWLOCK_IRQ)
+ spin_unlock_irq(&hwlock->lock);
+ else
+ spin_unlock(&hwlock->lock);
+
+ return -EBUSY;
+ }
+
+ /*
+ * We can be sure the other core's memory operations
+ * are observable to us only _after_ we successfully take
+ * the hwspinlock, and we must make sure that subsequent memory
+ * operations (both reads and writes) will not be reordered before
+ * we actually took the hwspinlock.
+ *
+ * Note: the implicit memory barrier of the spinlock above is too
+ * early, so we need this additional explicit memory barrier.
+ */
+ mb();
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__hwspin_trylock);
+
+/**
+ * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit
+ * @hwlock: the hwspinlock to be locked
+ * @timeout: timeout value in msecs
+ * @mode: mode which controls whether local interrupts are disabled or not
+ * @flags: a pointer to where the caller's interrupt state will be saved at (if
+ * requested)
+ *
+ * This function locks the given @hwlock. If the @hwlock
+ * is already taken, the function will busy loop waiting for it to
+ * be released, but give up after @timeout msecs have elapsed.
+ *
+ * Upon a successful return from this function, preemption is disabled
+ * (and possibly local interrupts, too), so the caller must not sleep,
+ * and is advised to release the hwspinlock as soon as possible.
+ * This is required in order to minimize remote cores polling on the
+ * hardware interconnect.
+ *
+ * The user decides whether local interrupts are disabled or not, and if yes,
+ * whether he wants their previous state to be saved. It is up to the user
+ * to choose the appropriate @mode of operation, exactly the same way users
+ * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.
+ *
+ * Returns 0 when the @hwlock was successfully taken, and an appropriate
+ * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still
+ * busy after @timeout msecs). The function will never sleep.
+ */
+int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
+ int mode, unsigned long *flags)
+{
+ int ret;
+ unsigned long expire;
+
+ expire = msecs_to_jiffies(to) + jiffies;
+
+ for (;;) {
+ /* Try to take the hwspinlock */
+ ret = __hwspin_trylock(hwlock, mode, flags);
+ if (ret != -EBUSY)
+ break;
+
+ /*
+ * The lock is already taken, let's check if the user wants
+ * us to try again
+ */
+ if (time_is_before_eq_jiffies(expire))
+ return -ETIMEDOUT;
+
+ /*
+ * Allow platform-specific relax handlers to prevent
+ * hogging the interconnect (no sleeping, though)
+ */
+ if (hwlock->ops->relax)
+ hwlock->ops->relax(hwlock);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__hwspin_lock_timeout);
+
+/**
+ * __hwspin_unlock() - unlock a specific hwspinlock
+ * @hwlock: a previously-acquired hwspinlock which we want to unlock
+ * @mode: controls whether local interrupts needs to be restored or not
+ * @flags: previous caller's interrupt state to restore (if requested)
+ *
+ * This function will unlock a specific hwspinlock, enable preemption and
+ * (possibly) enable interrupts or restore their previous state.
+ * @hwlock must be already locked before calling this function: it is a bug
+ * to call unlock on a @hwlock that is already unlocked.
+ *
+ * The user decides whether local interrupts should be enabled or not, and
+ * if yes, whether he wants their previous state to be restored. It is up
+ * to the user to choose the appropriate @mode of operation, exactly the
+ * same way users decide between spin_unlock, spin_unlock_irq and
+ * spin_unlock_irqrestore.
+ *
+ * The function will never sleep.
+ */
+void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
+{
+ BUG_ON(!hwlock);
+ BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
+
+ /*
+ * We must make sure that memory operations (both reads and writes),
+ * done before unlocking the hwspinlock, will not be reordered
+ * after the lock is released.
+ *
+ * That's the purpose of this explicit memory barrier.
+ *
+ * Note: the memory barrier induced by the spin_unlock below is too
+ * late; the other core is going to access memory soon after it will
+ * take the hwspinlock, and by then we want to be sure our memory
+ * operations are already observable.
+ */
+ mb();
+
+ hwlock->ops->unlock(hwlock);
+
+ /* Undo the spin_trylock{_irq, _irqsave} called while locking */
+ if (mode == HWLOCK_IRQSTATE)
+ spin_unlock_irqrestore(&hwlock->lock, *flags);
+ else if (mode == HWLOCK_IRQ)
+ spin_unlock_irq(&hwlock->lock);
+ else
+ spin_unlock(&hwlock->lock);
+}
+EXPORT_SYMBOL_GPL(__hwspin_unlock);
+
+/**
+ * hwspin_lock_register() - register a new hw spinlock
+ * @hwlock: hwspinlock to register.
+ *
+ * This function should be called from the underlying platform-specific
+ * implementation, to register a new hwspinlock instance.
+ *
+ * Can be called from an atomic context (will not sleep) but not from
+ * within interrupt context.
+ *
+ * Returns 0 on success, or an appropriate error code on failure
+ */
+int hwspin_lock_register(struct hwspinlock *hwlock)
+{
+ struct hwspinlock *tmp;
+ int ret;
+
+ if (!hwlock || !hwlock->ops ||
+ !hwlock->ops->trylock || !hwlock->ops->unlock) {
+ pr_err("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ spin_lock_init(&hwlock->lock);
+
+ spin_lock(&hwspinlock_tree_lock);
+
+ ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock);
+ if (ret)
+ goto out;
+
+ /* mark this hwspinlock as available */
+ tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id,
+ HWSPINLOCK_UNUSED);
+
+ /* self-sanity check which should never fail */
+ WARN_ON(tmp != hwlock);
+
+out:
+ spin_unlock(&hwspinlock_tree_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hwspin_lock_register);
+
+/**
+ * hwspin_lock_unregister() - unregister an hw spinlock
+ * @id: index of the specific hwspinlock to unregister
+ *
+ * This function should be called from the underlying platform-specific
+ * implementation, to unregister an existing (and unused) hwspinlock.
+ *
+ * Can be called from an atomic context (will not sleep) but not from
+ * within interrupt context.
+ *
+ * Returns the address of hwspinlock @id on success, or NULL on failure
+ */
+struct hwspinlock *hwspin_lock_unregister(unsigned int id)
+{
+ struct hwspinlock *hwlock = NULL;
+ int ret;
+
+ spin_lock(&hwspinlock_tree_lock);
+
+ /* make sure the hwspinlock is not in use (tag is set) */
+ ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
+ if (ret == 0) {
+ pr_err("hwspinlock %d still in use (or not present)\n", id);
+ goto out;
+ }
+
+ hwlock = radix_tree_delete(&hwspinlock_tree, id);
+ if (!hwlock) {
+ pr_err("failed to delete hwspinlock %d\n", id);
+ goto out;
+ }
+
+out:
+ spin_unlock(&hwspinlock_tree_lock);
+ return hwlock;
+}
+EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
+
+/**
+ * __hwspin_lock_request() - tag an hwspinlock as used and power it up
+ *
+ * This is an internal function that prepares an hwspinlock instance
+ * before it is given to the user. The function assumes that
+ * hwspinlock_tree_lock is taken.
+ *
+ * Returns 0 or positive to indicate success, and a negative value to
+ * indicate an error (with the appropriate error code)
+ */
+static int __hwspin_lock_request(struct hwspinlock *hwlock)
+{
+ struct hwspinlock *tmp;
+ int ret;
+
+ /* prevent underlying implementation from being removed */
+ if (!try_module_get(hwlock->owner)) {
+ dev_err(hwlock->dev, "%s: can't get owner\n", __func__);
+ return -EINVAL;
+ }
+
+ /* notify PM core that power is now needed */
+ ret = pm_runtime_get_sync(hwlock->dev);
+ if (ret < 0) {
+ dev_err(hwlock->dev, "%s: can't power on device\n", __func__);
+ return ret;
+ }
+
+ /* mark hwspinlock as used, should not fail */
+ tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock->id,
+ HWSPINLOCK_UNUSED);
+
+ /* self-sanity check that should never fail */
+ WARN_ON(tmp != hwlock);
+
+ return ret;
+}
+
+/**
+ * hwspin_lock_get_id() - retrieve id number of a given hwspinlock
+ * @hwlock: a valid hwspinlock instance
+ *
+ * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
+ */
+int hwspin_lock_get_id(struct hwspinlock *hwlock)
+{
+ if (!hwlock) {
+ pr_err("invalid hwlock\n");
+ return -EINVAL;
+ }
+
+ return hwlock->id;
+}
+EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
+
+/**
+ * hwspin_lock_request() - request an hwspinlock
+ *
+ * This function should be called by users of the hwspinlock device,
+ * in order to dynamically assign them an unused hwspinlock.
+ * Usually the user of this lock will then have to communicate the lock's id
+ * to the remote core before it can be used for synchronization (to get the
+ * id of a given hwlock, use hwspin_lock_get_id()).
+ *
+ * Can be called from an atomic context (will not sleep) but not from
+ * within interrupt context (simply because there is no use case for
+ * that yet).
+ *
+ * Returns the address of the assigned hwspinlock, or NULL on error
+ */
+struct hwspinlock *hwspin_lock_request(void)
+{
+ struct hwspinlock *hwlock;
+ int ret;
+
+ spin_lock(&hwspinlock_tree_lock);
+
+ /* look for an unused lock */
+ ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
+ 0, 1, HWSPINLOCK_UNUSED);
+ if (ret == 0) {
+ pr_warn("a free hwspinlock is not available\n");
+ hwlock = NULL;
+ goto out;
+ }
+
+ /* sanity check that should never fail */
+ WARN_ON(ret > 1);
+
+ /* mark as used and power up */
+ ret = __hwspin_lock_request(hwlock);
+ if (ret < 0)
+ hwlock = NULL;
+
+out:
+ spin_unlock(&hwspinlock_tree_lock);
+ return hwlock;
+}
+EXPORT_SYMBOL_GPL(hwspin_lock_request);
+
+/**
+ * hwspin_lock_request_specific() - request for a specific hwspinlock
+ * @id: index of the specific hwspinlock that is requested
+ *
+ * This function should be called by users of the hwspinlock module,
+ * in order to assign them a specific hwspinlock.
+ * Usually early board code will be calling this function in order to
+ * reserve specific hwspinlock ids for predefined purposes.
+ *
+ * Can be called from an atomic context (will not sleep) but not from
+ * within interrupt context (simply because there is no use case for
+ * that yet).
+ *
+ * Returns the address of the assigned hwspinlock, or NULL on error
+ */
+struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
+{
+ struct hwspinlock *hwlock;
+ int ret;
+
+ spin_lock(&hwspinlock_tree_lock);
+
+ /* make sure this hwspinlock exists */
+ hwlock = radix_tree_lookup(&hwspinlock_tree, id);
+ if (!hwlock) {
+ pr_warn("hwspinlock %u does not exist\n", id);
+ goto out;
+ }
+
+ /* sanity check (this shouldn't happen) */
+ WARN_ON(hwlock->id != id);
+
+ /* make sure this hwspinlock is unused */
+ ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
+ if (ret == 0) {
+ pr_warn("hwspinlock %u is already in use\n", id);
+ hwlock = NULL;
+ goto out;
+ }
+
+ /* mark as used and power up */
+ ret = __hwspin_lock_request(hwlock);
+ if (ret < 0)
+ hwlock = NULL;
+
+out:
+ spin_unlock(&hwspinlock_tree_lock);
+ return hwlock;
+}
+EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
+
+/**
+ * hwspin_lock_free() - free a specific hwspinlock
+ * @hwlock: the specific hwspinlock to free
+ *
+ * This function mark @hwlock as free again.
+ * Should only be called with an @hwlock that was retrieved from
+ * an earlier call to omap_hwspin_lock_request{_specific}.
+ *
+ * Can be called from an atomic context (will not sleep) but not from
+ * within interrupt context (simply because there is no use case for
+ * that yet).
+ *
+ * Returns 0 on success, or an appropriate error code on failure
+ */
+int hwspin_lock_free(struct hwspinlock *hwlock)
+{
+ struct hwspinlock *tmp;
+ int ret;
+
+ if (!hwlock) {
+ pr_err("invalid hwlock\n");
+ return -EINVAL;
+ }
+
+ spin_lock(&hwspinlock_tree_lock);
+
+ /* make sure the hwspinlock is used */
+ ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id,
+ HWSPINLOCK_UNUSED);
+ if (ret == 1) {
+ dev_err(hwlock->dev, "%s: hwlock is already free\n", __func__);
+ dump_stack();
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* notify the underlying device that power is not needed */
+ ret = pm_runtime_put(hwlock->dev);
+ if (ret < 0)
+ goto out;
+
+ /* mark this hwspinlock as available */
+ tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id,
+ HWSPINLOCK_UNUSED);
+
+ /* sanity check (this shouldn't happen) */
+ WARN_ON(tmp != hwlock);
+
+ module_put(hwlock->owner);
+
+out:
+ spin_unlock(&hwspinlock_tree_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hwspin_lock_free);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Hardware spinlock interface");
+MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
diff --git a/drivers/hwspinlock/hwspinlock_internal.h b/drivers/hwspinlock/hwspinlock_internal.h
new file mode 100644
index 00000000000..69935e6b93e
--- /dev/null
+++ b/drivers/hwspinlock/hwspinlock_internal.h
@@ -0,0 +1,61 @@
+/*
+ * Hardware spinlocks internal header
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Contact: Ohad Ben-Cohen <ohad@wizery.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __HWSPINLOCK_HWSPINLOCK_H
+#define __HWSPINLOCK_HWSPINLOCK_H
+
+#include <linux/spinlock.h>
+#include <linux/device.h>
+
+/**
+ * struct hwspinlock_ops - platform-specific hwspinlock handlers
+ *
+ * @trylock: make a single attempt to take the lock. returns 0 on
+ * failure and true on success. may _not_ sleep.
+ * @unlock: release the lock. always succeed. may _not_ sleep.
+ * @relax: optional, platform-specific relax handler, called by hwspinlock
+ * core while spinning on a lock, between two successive
+ * invocations of @trylock. may _not_ sleep.
+ */
+struct hwspinlock_ops {
+ int (*trylock)(struct hwspinlock *lock);
+ void (*unlock)(struct hwspinlock *lock);
+ void (*relax)(struct hwspinlock *lock);
+};
+
+/**
+ * struct hwspinlock - this struct represents a single hwspinlock instance
+ *
+ * @dev: underlying device, will be used to invoke runtime PM api
+ * @ops: platform-specific hwspinlock handlers
+ * @id: a global, unique, system-wide, index of the lock.
+ * @lock: initialized and used by hwspinlock core
+ * @owner: underlying implementation module, used to maintain module ref count
+ *
+ * Note: currently simplicity was opted for, but later we can squeeze some
+ * memory bytes by grouping the dev, ops and owner members in a single
+ * per-platform struct, and have all hwspinlocks point at it.
+ */
+struct hwspinlock {
+ struct device *dev;
+ const struct hwspinlock_ops *ops;
+ int id;
+ spinlock_t lock;
+ struct module *owner;
+};
+
+#endif /* __HWSPINLOCK_HWSPINLOCK_H */
diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c
new file mode 100644
index 00000000000..a8f02734c02
--- /dev/null
+++ b/drivers/hwspinlock/omap_hwspinlock.c
@@ -0,0 +1,231 @@
+/*
+ * OMAP hardware spinlock driver
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Contact: Simon Que <sque@ti.com>
+ * Hari Kanigeri <h-kanigeri2@ti.com>
+ * Ohad Ben-Cohen <ohad@wizery.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/hwspinlock.h>
+#include <linux/platform_device.h>
+
+#include "hwspinlock_internal.h"
+
+/* Spinlock register offsets */
+#define SYSSTATUS_OFFSET 0x0014
+#define LOCK_BASE_OFFSET 0x0800
+
+#define SPINLOCK_NUMLOCKS_BIT_OFFSET (24)
+
+/* Possible values of SPINLOCK_LOCK_REG */
+#define SPINLOCK_NOTTAKEN (0) /* free */
+#define SPINLOCK_TAKEN (1) /* locked */
+
+#define to_omap_hwspinlock(lock) \
+ container_of(lock, struct omap_hwspinlock, lock)
+
+struct omap_hwspinlock {
+ struct hwspinlock lock;
+ void __iomem *addr;
+};
+
+struct omap_hwspinlock_state {
+ int num_locks; /* Total number of locks in system */
+ void __iomem *io_base; /* Mapped base address */
+};
+
+static int omap_hwspinlock_trylock(struct hwspinlock *lock)
+{
+ struct omap_hwspinlock *omap_lock = to_omap_hwspinlock(lock);
+
+ /* attempt to acquire the lock by reading its value */
+ return (SPINLOCK_NOTTAKEN == readl(omap_lock->addr));
+}
+
+static void omap_hwspinlock_unlock(struct hwspinlock *lock)
+{
+ struct omap_hwspinlock *omap_lock = to_omap_hwspinlock(lock);
+
+ /* release the lock by writing 0 to it */
+ writel(SPINLOCK_NOTTAKEN, omap_lock->addr);
+}
+
+/*
+ * relax the OMAP interconnect while spinning on it.
+ *
+ * The specs recommended that the retry delay time will be
+ * just over half of the time that a requester would be
+ * expected to hold the lock.
+ *
+ * The number below is taken from an hardware specs example,
+ * obviously it is somewhat arbitrary.
+ */
+static void omap_hwspinlock_relax(struct hwspinlock *lock)
+{
+ ndelay(50);
+}
+
+static const struct hwspinlock_ops omap_hwspinlock_ops = {
+ .trylock = omap_hwspinlock_trylock,
+ .unlock = omap_hwspinlock_unlock,
+ .relax = omap_hwspinlock_relax,
+};
+
+static int __devinit omap_hwspinlock_probe(struct platform_device *pdev)
+{
+ struct omap_hwspinlock *omap_lock;
+ struct omap_hwspinlock_state *state;
+ struct hwspinlock *lock;
+ struct resource *res;
+ void __iomem *io_base;
+ int i, ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ io_base = ioremap(res->start, resource_size(res));
+ if (!io_base) {
+ ret = -ENOMEM;
+ goto free_state;
+ }
+
+ /* Determine number of locks */
+ i = readl(io_base + SYSSTATUS_OFFSET);
+ i >>= SPINLOCK_NUMLOCKS_BIT_OFFSET;
+
+ /* one of the four lsb's must be set, and nothing else */
+ if (hweight_long(i & 0xf) != 1 || i > 8) {
+ ret = -EINVAL;
+ goto iounmap_base;
+ }
+
+ state->num_locks = i * 32;
+ state->io_base = io_base;
+
+ platform_set_drvdata(pdev, state);
+
+ /*
+ * runtime PM will make sure the clock of this module is
+ * enabled iff at least one lock is requested
+ */
+ pm_runtime_enable(&pdev->dev);
+
+ for (i = 0; i < state->num_locks; i++) {
+ omap_lock = kzalloc(sizeof(*omap_lock), GFP_KERNEL);
+ if (!omap_lock) {
+ ret = -ENOMEM;
+ goto free_locks;
+ }
+
+ omap_lock->lock.dev = &pdev->dev;
+ omap_lock->lock.owner = THIS_MODULE;
+ omap_lock->lock.id = i;
+ omap_lock->lock.ops = &omap_hwspinlock_ops;
+ omap_lock->addr = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i;
+
+ ret = hwspin_lock_register(&omap_lock->lock);
+ if (ret) {
+ kfree(omap_lock);
+ goto free_locks;
+ }
+ }
+
+ return 0;
+
+free_locks:
+ while (--i >= 0) {
+ lock = hwspin_lock_unregister(i);
+ /* this should't happen, but let's give our best effort */
+ if (!lock) {
+ dev_err(&pdev->dev, "%s: cleanups failed\n", __func__);
+ continue;
+ }
+ omap_lock = to_omap_hwspinlock(lock);
+ kfree(omap_lock);
+ }
+ pm_runtime_disable(&pdev->dev);
+iounmap_base:
+ iounmap(io_base);
+free_state:
+ kfree(state);
+ return ret;
+}
+
+static int omap_hwspinlock_remove(struct platform_device *pdev)
+{
+ struct omap_hwspinlock_state *state = platform_get_drvdata(pdev);
+ struct hwspinlock *lock;
+ struct omap_hwspinlock *omap_lock;
+ int i;
+
+ for (i = 0; i < state->num_locks; i++) {
+ lock = hwspin_lock_unregister(i);
+ /* this shouldn't happen at this point. if it does, at least
+ * don't continue with the remove */
+ if (!lock) {
+ dev_err(&pdev->dev, "%s: failed on %d\n", __func__, i);
+ return -EBUSY;
+ }
+
+ omap_lock = to_omap_hwspinlock(lock);
+ kfree(omap_lock);
+ }
+
+ pm_runtime_disable(&pdev->dev);
+ iounmap(state->io_base);
+ kfree(state);
+
+ return 0;
+}
+
+static struct platform_driver omap_hwspinlock_driver = {
+ .probe = omap_hwspinlock_probe,
+ .remove = omap_hwspinlock_remove,
+ .driver = {
+ .name = "omap_hwspinlock",
+ },
+};
+
+static int __init omap_hwspinlock_init(void)
+{
+ return platform_driver_register(&omap_hwspinlock_driver);
+}
+/* board init code might need to reserve hwspinlocks for predefined purposes */
+postcore_initcall(omap_hwspinlock_init);
+
+static void __exit omap_hwspinlock_exit(void)
+{
+ platform_driver_unregister(&omap_hwspinlock_driver);
+}
+module_exit(omap_hwspinlock_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Hardware spinlock driver for OMAP");
+MODULE_AUTHOR("Simon Que <sque@ti.com>");
+MODULE_AUTHOR("Hari Kanigeri <h-kanigeri2@ti.com>");
+MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 61653f07967..fee1a261386 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -49,6 +49,7 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/wait.h>
@@ -305,7 +306,7 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
return -EIO;
}
- pdata = pdev->dev.platform_data;
+ pdata = mfd_get_data(pdev);
if (pdata) {
i2c->regstep = pdata->regstep;
i2c->clock_khz = pdata->clock_khz;
@@ -330,9 +331,7 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
i2c->adap = ocores_adapter;
i2c_set_adapdata(&i2c->adap, i2c);
i2c->adap.dev.parent = &pdev->dev;
-#ifdef CONFIG_OF
i2c->adap.dev.of_node = pdev->dev.of_node;
-#endif
/* add i2c adapter to i2c tree */
ret = i2c_add_adapter(&i2c->adap);
@@ -390,15 +389,11 @@ static int ocores_i2c_resume(struct platform_device *pdev)
#define ocores_i2c_resume NULL
#endif
-#ifdef CONFIG_OF
static struct of_device_id ocores_i2c_match[] = {
- {
- .compatible = "opencores,i2c-ocores",
- },
- {},
+ { .compatible = "opencores,i2c-ocores", },
+ {},
};
MODULE_DEVICE_TABLE(of, ocores_i2c_match);
-#endif
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:ocores-i2c");
@@ -411,9 +406,7 @@ static struct platform_driver ocores_i2c_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "ocores-i2c",
-#ifdef CONFIG_OF
- .of_match_table = ocores_i2c_match,
-#endif
+ .of_match_table = ocores_i2c_match,
},
};
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 58a58c7eaa1..d53cd61c059 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -42,12 +42,12 @@
#include <linux/pm_runtime.h>
/* I2C controller revisions */
-#define OMAP_I2C_REV_2 0x20
+#define OMAP_I2C_OMAP1_REV_2 0x20
/* I2C controller revisions present on specific hardware */
#define OMAP_I2C_REV_ON_2430 0x36
#define OMAP_I2C_REV_ON_3430 0x3C
-#define OMAP_I2C_REV_ON_4430 0x40
+#define OMAP_I2C_REV_ON_3530_4430 0x40
/* timeout waiting for the controller to respond */
#define OMAP_I2C_TIMEOUT (msecs_to_jiffies(1000))
@@ -72,11 +72,12 @@ enum {
OMAP_I2C_SCLH_REG,
OMAP_I2C_SYSTEST_REG,
OMAP_I2C_BUFSTAT_REG,
- OMAP_I2C_REVNB_LO,
- OMAP_I2C_REVNB_HI,
- OMAP_I2C_IRQSTATUS_RAW,
- OMAP_I2C_IRQENABLE_SET,
- OMAP_I2C_IRQENABLE_CLR,
+ /* only on OMAP4430 */
+ OMAP_I2C_IP_V2_REVNB_LO,
+ OMAP_I2C_IP_V2_REVNB_HI,
+ OMAP_I2C_IP_V2_IRQSTATUS_RAW,
+ OMAP_I2C_IP_V2_IRQENABLE_SET,
+ OMAP_I2C_IP_V2_IRQENABLE_CLR,
};
/* I2C Interrupt Enable Register (OMAP_I2C_IE): */
@@ -204,7 +205,7 @@ struct omap_i2c_dev {
u16 errata;
};
-const static u8 reg_map[] = {
+const static u8 reg_map_ip_v1[] = {
[OMAP_I2C_REV_REG] = 0x00,
[OMAP_I2C_IE_REG] = 0x01,
[OMAP_I2C_STAT_REG] = 0x02,
@@ -225,7 +226,7 @@ const static u8 reg_map[] = {
[OMAP_I2C_BUFSTAT_REG] = 0x10,
};
-const static u8 omap4_reg_map[] = {
+const static u8 reg_map_ip_v2[] = {
[OMAP_I2C_REV_REG] = 0x04,
[OMAP_I2C_IE_REG] = 0x2c,
[OMAP_I2C_STAT_REG] = 0x28,
@@ -244,11 +245,11 @@ const static u8 omap4_reg_map[] = {
[OMAP_I2C_SCLH_REG] = 0xb8,
[OMAP_I2C_SYSTEST_REG] = 0xbC,
[OMAP_I2C_BUFSTAT_REG] = 0xc0,
- [OMAP_I2C_REVNB_LO] = 0x00,
- [OMAP_I2C_REVNB_HI] = 0x04,
- [OMAP_I2C_IRQSTATUS_RAW] = 0x24,
- [OMAP_I2C_IRQENABLE_SET] = 0x2c,
- [OMAP_I2C_IRQENABLE_CLR] = 0x30,
+ [OMAP_I2C_IP_V2_REVNB_LO] = 0x00,
+ [OMAP_I2C_IP_V2_REVNB_HI] = 0x04,
+ [OMAP_I2C_IP_V2_IRQSTATUS_RAW] = 0x24,
+ [OMAP_I2C_IP_V2_IRQENABLE_SET] = 0x2c,
+ [OMAP_I2C_IP_V2_IRQENABLE_CLR] = 0x30,
};
static inline void omap_i2c_write_reg(struct omap_i2c_dev *i2c_dev,
@@ -276,7 +277,7 @@ static void omap_i2c_unidle(struct omap_i2c_dev *dev)
pm_runtime_get_sync(&pdev->dev);
- if (cpu_is_omap34xx()) {
+ if (pdata->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, dev->pscstate);
omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, dev->scllstate);
@@ -308,12 +309,12 @@ static void omap_i2c_idle(struct omap_i2c_dev *dev)
pdata = pdev->dev.platform_data;
dev->iestate = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
- if (dev->rev >= OMAP_I2C_REV_ON_4430)
- omap_i2c_write_reg(dev, OMAP_I2C_IRQENABLE_CLR, 1);
+ if (pdata->rev == OMAP_I2C_IP_VERSION_2)
+ omap_i2c_write_reg(dev, OMAP_I2C_IP_V2_IRQENABLE_CLR, 1);
else
omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, 0);
- if (dev->rev < OMAP_I2C_REV_2) {
+ if (dev->rev < OMAP_I2C_OMAP1_REV_2) {
iv = omap_i2c_read_reg(dev, OMAP_I2C_IV_REG); /* Read clears */
} else {
omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, dev->iestate);
@@ -334,8 +335,13 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
unsigned long timeout;
unsigned long internal_clk = 0;
struct clk *fclk;
+ struct platform_device *pdev;
+ struct omap_i2c_bus_platform_data *pdata;
+
+ pdev = to_platform_device(dev->dev);
+ pdata = pdev->dev.platform_data;
- if (dev->rev >= OMAP_I2C_REV_2) {
+ if (dev->rev >= OMAP_I2C_OMAP1_REV_2) {
/* Disable I2C controller before soft reset */
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG,
omap_i2c_read_reg(dev, OMAP_I2C_CON_REG) &
@@ -378,12 +384,14 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
* REVISIT: Some wkup sources might not be needed.
*/
dev->westate = OMAP_I2C_WE_ALL;
- omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate);
+ if (dev->rev < OMAP_I2C_REV_ON_3530_4430)
+ omap_i2c_write_reg(dev, OMAP_I2C_WE_REG,
+ dev->westate);
}
}
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
- if (cpu_class_is_omap1()) {
+ if (pdata->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) {
/*
* The I2C functional clock is the armxor_ck, so there's
* no need to get "armxor_ck" separately. Now, if OMAP2420
@@ -407,7 +415,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
psc = fclk_rate / 12000000;
}
- if (!(cpu_class_is_omap1() || cpu_is_omap2420())) {
+ if (!(pdata->flags & OMAP_I2C_FLAG_SIMPLE_CLOCK)) {
/*
* HSI2C controller internal clk rate should be 19.2 Mhz for
@@ -415,7 +423,8 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
* to get longer filter period for better noise suppression.
* The filter is iclk (fclk for HS) period.
*/
- if (dev->speed > 400 || cpu_is_omap2430())
+ if (dev->speed > 400 ||
+ pdata->flags & OMAP_I2C_FLAG_FORCE_19200_INT_CLK)
internal_clk = 19200;
else if (dev->speed > 100)
internal_clk = 9600;
@@ -484,7 +493,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
dev->errata = 0;
- if (cpu_is_omap2430() || cpu_is_omap34xx())
+ if (pdata->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207)
dev->errata |= I2C_OMAP_ERRATA_I207;
/* Enable interrupts */
@@ -493,7 +502,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
OMAP_I2C_IE_AL) | ((dev->fifo_size) ?
(OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0);
omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate);
- if (cpu_is_omap34xx()) {
+ if (pdata->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
dev->pscstate = psc;
dev->scllstate = scll;
dev->sclhstate = sclh;
@@ -720,7 +729,7 @@ static inline void i2c_omap_errata_i207(struct omap_i2c_dev *dev, u16 stat)
#ifdef CONFIG_ARCH_OMAP15XX
static irqreturn_t
-omap_i2c_rev1_isr(int this_irq, void *dev_id)
+omap_i2c_omap1_isr(int this_irq, void *dev_id)
{
struct omap_i2c_dev *dev = dev_id;
u16 iv, w;
@@ -774,7 +783,7 @@ omap_i2c_rev1_isr(int this_irq, void *dev_id)
return IRQ_HANDLED;
}
#else
-#define omap_i2c_rev1_isr NULL
+#define omap_i2c_omap1_isr NULL
#endif
/*
@@ -813,6 +822,11 @@ omap_i2c_isr(int this_irq, void *dev_id)
u16 bits;
u16 stat, w;
int err, count = 0;
+ struct platform_device *pdev;
+ struct omap_i2c_bus_platform_data *pdata;
+
+ pdev = to_platform_device(dev->dev);
+ pdata = pdev->dev.platform_data;
if (dev->idle)
return IRQ_NONE;
@@ -881,8 +895,8 @@ complete:
* Data reg in 2430, omap3 and
* omap4 is 8 bit wide
*/
- if (cpu_class_is_omap1() ||
- cpu_is_omap2420()) {
+ if (pdata->flags &
+ OMAP_I2C_FLAG_16BIT_DATA_REG) {
if (dev->buf_len) {
*dev->buf++ = w >> 8;
dev->buf_len--;
@@ -924,8 +938,8 @@ complete:
* Data reg in 2430, omap3 and
* omap4 is 8 bit wide
*/
- if (cpu_class_is_omap1() ||
- cpu_is_omap2420()) {
+ if (pdata->flags &
+ OMAP_I2C_FLAG_16BIT_DATA_REG) {
if (dev->buf_len) {
w |= *dev->buf++ << 8;
dev->buf_len--;
@@ -1027,17 +1041,12 @@ omap_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
- if (cpu_is_omap7xx())
- dev->reg_shift = 1;
- else if (cpu_is_omap44xx())
- dev->reg_shift = 0;
- else
- dev->reg_shift = 2;
+ dev->reg_shift = (pdata->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3;
- if (cpu_is_omap44xx())
- dev->regs = (u8 *) omap4_reg_map;
+ if (pdata->rev == OMAP_I2C_IP_VERSION_2)
+ dev->regs = (u8 *)reg_map_ip_v2;
else
- dev->regs = (u8 *) reg_map;
+ dev->regs = (u8 *)reg_map_ip_v1;
pm_runtime_enable(&pdev->dev);
omap_i2c_unidle(dev);
@@ -1047,7 +1056,7 @@ omap_i2c_probe(struct platform_device *pdev)
if (dev->rev <= OMAP_I2C_REV_ON_3430)
dev->errata |= I2C_OMAP3_1P153;
- if (!(cpu_class_is_omap1() || cpu_is_omap2420())) {
+ if (!(pdata->flags & OMAP_I2C_FLAG_NO_FIFO)) {
u16 s;
/* Set up the fifo size - Get total size */
@@ -1059,7 +1068,7 @@ omap_i2c_probe(struct platform_device *pdev)
* size. This is to ensure that we can handle the status on int
* call back latencies.
*/
- if (dev->rev >= OMAP_I2C_REV_ON_4430) {
+ if (dev->rev >= OMAP_I2C_REV_ON_3530_4430) {
dev->fifo_size = 0;
dev->b_hw = 0; /* Disable hardware fixes */
} else {
@@ -1075,7 +1084,8 @@ omap_i2c_probe(struct platform_device *pdev)
/* reset ASAP, clearing any IRQs */
omap_i2c_init(dev);
- isr = (dev->rev < OMAP_I2C_REV_2) ? omap_i2c_rev1_isr : omap_i2c_isr;
+ isr = (dev->rev < OMAP_I2C_OMAP1_REV_2) ? omap_i2c_omap1_isr :
+ omap_i2c_isr;
r = request_irq(dev->irq, isr, 0, pdev->name, dev);
if (r) {
@@ -1083,8 +1093,8 @@ omap_i2c_probe(struct platform_device *pdev)
goto err_unuse_clocks;
}
- dev_info(dev->dev, "bus %d rev%d.%d at %d kHz\n",
- pdev->id, dev->rev >> 4, dev->rev & 0xf, dev->speed);
+ dev_info(dev->dev, "bus %d rev%d.%d.%d at %d kHz\n", pdev->id,
+ pdata->rev, dev->rev >> 4, dev->rev & 0xf, dev->speed);
omap_i2c_idle(dev);
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index a9c419e075a..9fbd7e6fe32 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -34,6 +34,7 @@
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/wait.h>
@@ -704,7 +705,7 @@ static int __devinit xiic_i2c_probe(struct platform_device *pdev)
if (irq < 0)
goto resource_missing;
- pdata = (struct xiic_i2c_platform_data *) pdev->dev.platform_data;
+ pdata = mfd_get_data(pdev);
if (!pdata)
return -EINVAL;
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index f0bd5bcdf56..045ba6efea4 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -537,9 +537,7 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
client->dev.parent = &client->adapter->dev;
client->dev.bus = &i2c_bus_type;
client->dev.type = &i2c_client_type;
-#ifdef CONFIG_OF
client->dev.of_node = info->of_node;
-#endif
dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
client->addr);
diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c
index c976285d313..f7a7d69c3a6 100644
--- a/drivers/idle/i7300_idle.c
+++ b/drivers/idle/i7300_idle.c
@@ -27,6 +27,7 @@
#include <linux/debugfs.h>
#include <linux/stop_machine.h>
#include <linux/i7300_idle.h>
+#include <linux/idle.h>
#include <asm/idle.h>
@@ -584,7 +585,7 @@ static int __init i7300_idle_init(void)
}
}
- idle_notifier_register(&i7300_idle_nb);
+ register_idle_notifier(&i7300_idle_nb);
printk(KERN_INFO "i7300_idle: loaded v%s\n", I7300_IDLE_DRIVER_VERSION);
return 0;
@@ -592,7 +593,7 @@ static int __init i7300_idle_init(void)
static void __exit i7300_idle_exit(void)
{
- idle_notifier_unregister(&i7300_idle_nb);
+ unregister_idle_notifier(&i7300_idle_nb);
free_cpumask_var(idle_cpumask);
if (debugfs_dir) {
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 11905b6a302..38930b01cb2 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -218,6 +218,9 @@ static void input_handle_event(struct input_dev *dev,
{
int disposition = INPUT_IGNORE_EVENT;
+ trace_mark(input, input_event,
+ "type %u code %u value %d", type, code, value);
+
switch (type) {
case EV_SYN:
diff --git a/drivers/input/misc/88pm860x_onkey.c b/drivers/input/misc/88pm860x_onkey.c
index 4cc82826ea6..3dca3c14510 100644
--- a/drivers/input/misc/88pm860x_onkey.c
+++ b/drivers/input/misc/88pm860x_onkey.c
@@ -74,7 +74,7 @@ static int __devinit pm860x_onkey_probe(struct platform_device *pdev)
info->chip = chip;
info->i2c = (chip->id == CHIP_PM8607) ? chip->client : chip->companion;
info->dev = &pdev->dev;
- info->irq = irq + chip->irq_base;
+ info->irq = irq;
info->idev = input_allocate_device();
if (!info->idev) {
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 014dd4ad0d4..6a11694e3fc 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -29,6 +29,7 @@
#include <linux/workqueue.h>
#include <linux/i2c/twl.h>
#include <linux/mfd/twl4030-codec.h>
+#include <linux/mfd/core.h>
#include <linux/input.h>
#include <linux/slab.h>
@@ -196,7 +197,7 @@ static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
static int __devinit twl4030_vibra_probe(struct platform_device *pdev)
{
- struct twl4030_codec_vibra_data *pdata = pdev->dev.platform_data;
+ struct twl4030_codec_vibra_data *pdata = mfd_get_data(pdev);
struct vibra_info *info;
int ret;
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 92563a681d6..12abc50508e 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -107,7 +107,8 @@ static void amba_kmi_close(struct serio *io)
clk_disable(kmi->clk);
}
-static int __devinit amba_kmi_probe(struct amba_device *dev, struct amba_id *id)
+static int __devinit amba_kmi_probe(struct amba_device *dev,
+ const struct amba_id *id)
{
struct amba_kmi_port *kmi;
struct serio *io;
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index e672b44ee17..416def84d04 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -17,6 +17,7 @@
#include <linux/leds.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/88pm860x.h>
#define LED_PWM_SHIFT (3)
@@ -118,7 +119,8 @@ static void pm860x_led_work(struct work_struct *work)
struct pm860x_led *led;
struct pm860x_chip *chip;
- int mask;
+ unsigned char buf[3];
+ int mask, ret;
led = container_of(work, struct pm860x_led, work);
chip = led->chip;
@@ -128,16 +130,27 @@ static void pm860x_led_work(struct work_struct *work)
pm860x_set_bits(led->i2c, __led_off(led->port),
LED_CURRENT_MASK, led->iset);
}
+ pm860x_set_bits(led->i2c, __blink_off(led->port),
+ LED_BLINK_MASK, LED_ON_CONTINUOUS);
mask = __blink_ctl_mask(led->port);
pm860x_set_bits(led->i2c, PM8606_WLED3B, mask, mask);
- } else if (led->brightness == 0) {
- pm860x_set_bits(led->i2c, __led_off(led->port),
- LED_CURRENT_MASK, 0);
- mask = __blink_ctl_mask(led->port);
- pm860x_set_bits(led->i2c, PM8606_WLED3B, mask, 0);
}
pm860x_set_bits(led->i2c, __led_off(led->port), LED_PWM_MASK,
led->brightness);
+
+ if (led->brightness == 0) {
+ pm860x_bulk_read(led->i2c, __led_off(led->port), 3, buf);
+ ret = buf[0] & LED_PWM_MASK;
+ ret |= buf[1] & LED_PWM_MASK;
+ ret |= buf[2] & LED_PWM_MASK;
+ if (ret == 0) {
+ /* unset current since no led is lighting */
+ pm860x_set_bits(led->i2c, __led_off(led->port),
+ LED_CURRENT_MASK, 0);
+ mask = __blink_ctl_mask(led->port);
+ pm860x_set_bits(led->i2c, PM8606_WLED3B, mask, 0);
+ }
+ }
led->current_brightness = led->brightness;
dev_dbg(chip->dev, "Update LED. (reg:%d, brightness:%d)\n",
__led_off(led->port), led->brightness);
@@ -153,31 +166,12 @@ static void pm860x_led_set(struct led_classdev *cdev,
schedule_work(&data->work);
}
-static int __check_device(struct pm860x_led_pdata *pdata, char *name)
-{
- struct pm860x_led_pdata *p = pdata;
- int ret = -EINVAL;
-
- while (p && p->id) {
- if ((p->id != PM8606_ID_LED) || (p->flags < 0))
- break;
-
- if (!strncmp(name, pm860x_led_name[p->flags],
- MFD_NAME_SIZE)) {
- ret = (int)p->flags;
- break;
- }
- p++;
- }
- return ret;
-}
-
static int pm860x_led_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
- struct pm860x_platform_data *pm860x_pdata;
struct pm860x_led_pdata *pdata;
struct pm860x_led *data;
+ struct mfd_cell *cell;
struct resource *res;
int ret;
@@ -187,10 +181,11 @@ static int pm860x_led_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (pdev->dev.parent->platform_data) {
- pm860x_pdata = pdev->dev.parent->platform_data;
- pdata = pm860x_pdata->led;
- } else {
+ cell = pdev->dev.platform_data;
+ if (cell == NULL)
+ return -ENODEV;
+ pdata = cell->mfd_data;
+ if (pdata == NULL) {
dev_err(&pdev->dev, "No platform data!\n");
return -EINVAL;
}
@@ -198,12 +193,12 @@ static int pm860x_led_probe(struct platform_device *pdev)
data = kzalloc(sizeof(struct pm860x_led), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
- strncpy(data->name, res->name, MFD_NAME_SIZE);
+ strncpy(data->name, res->name, MFD_NAME_SIZE - 1);
dev_set_drvdata(&pdev->dev, data);
data->chip = chip;
data->i2c = (chip->id == CHIP_PM8606) ? chip->client : chip->companion;
data->iset = pdata->iset;
- data->port = __check_device(pdata, data->name);
+ data->port = pdata->flags;
if (data->port < 0) {
dev_err(&pdev->dev, "check device failed\n");
kfree(data);
@@ -221,6 +216,7 @@ static int pm860x_led_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to register LED: %d\n", ret);
goto out;
}
+ pm860x_led_set(&data->cdev, 0);
return 0;
out:
kfree(data);
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index f05bb08d0f0..06a5bb48470 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -22,6 +22,7 @@
#include <linux/leds.h>
#include <linux/workqueue.h>
#include <linux/mfd/mc13783.h>
+#include <linux/mfd/core.h>
#include <linux/slab.h>
struct mc13783_led {
@@ -183,7 +184,7 @@ static int __devinit mc13783_led_setup(struct mc13783_led *led, int max_current)
static int __devinit mc13783_leds_prepare(struct platform_device *pdev)
{
- struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct mc13783_leds_platform_data *pdata = mfd_get_data(pdev);
struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent);
int ret = 0;
int reg = 0;
@@ -264,7 +265,7 @@ out:
static int __devinit mc13783_led_probe(struct platform_device *pdev)
{
- struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct mc13783_leds_platform_data *pdata = mfd_get_data(pdev);
struct mc13783_led_platform_data *led_cur;
struct mc13783_led *led, *led_dat;
int ret, i;
@@ -351,7 +352,7 @@ err_free:
static int __devexit mc13783_led_remove(struct platform_device *pdev)
{
- struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct mc13783_leds_platform_data *pdata = mfd_get_data(pdev);
struct mc13783_led *led = platform_get_drvdata(pdev);
struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent);
int i;
diff --git a/drivers/media/radio/radio-timb.c b/drivers/media/radio/radio-timb.c
index a185610b376..1e3a8dd820a 100644
--- a/drivers/media/radio/radio-timb.c
+++ b/drivers/media/radio/radio-timb.c
@@ -21,6 +21,7 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/i2c.h>
@@ -148,7 +149,7 @@ static const struct v4l2_file_operations timbradio_fops = {
static int __devinit timbradio_probe(struct platform_device *pdev)
{
- struct timb_radio_platform_data *pdata = pdev->dev.platform_data;
+ struct timb_radio_platform_data *pdata = mfd_get_data(pdev);
struct timbradio *tr;
int err;
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 7ecc8e65766..4698eb00b59 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -2138,7 +2138,7 @@ static int wl1273_fm_radio_remove(struct platform_device *pdev)
static int __devinit wl1273_fm_radio_probe(struct platform_device *pdev)
{
- struct wl1273_core **core = pdev->dev.platform_data;
+ struct wl1273_core **core = mfd_get_data(pdev);
struct wl1273_device *radio;
struct v4l2_ctrl *ctrl;
int r = 0;
diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
index fc611ebeb82..84d4c7c8343 100644
--- a/drivers/media/video/timblogiw.c
+++ b/drivers/media/video/timblogiw.c
@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dmaengine.h>
+#include <linux/mfd/core.h>
#include <linux/scatterlist.h>
#include <linux/interrupt.h>
#include <linux/list.h>
@@ -790,7 +791,7 @@ static int __devinit timblogiw_probe(struct platform_device *pdev)
{
int err;
struct timblogiw *lw = NULL;
- struct timb_video_platform_data *pdata = pdev->dev.platform_data;
+ struct timb_video_platform_data *pdata = mfd_get_data(pdev);
if (!pdata) {
dev_err(&pdev->dev, "No platform data\n");
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 793300c554b..9c511c1604a 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -17,230 +17,138 @@
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/88pm860x.h>
+#include <linux/regulator/machine.h>
#define INT_STATUS_NUM 3
-char pm860x_backlight_name[][MFD_NAME_SIZE] = {
- "backlight-0",
- "backlight-1",
- "backlight-2",
+static struct resource bk_resources[] __initdata = {
+ {PM8606_BACKLIGHT1, PM8606_BACKLIGHT1, "backlight-0", IORESOURCE_IO,},
+ {PM8606_BACKLIGHT2, PM8606_BACKLIGHT2, "backlight-1", IORESOURCE_IO,},
+ {PM8606_BACKLIGHT3, PM8606_BACKLIGHT3, "backlight-2", IORESOURCE_IO,},
};
-EXPORT_SYMBOL(pm860x_backlight_name);
-
-char pm860x_led_name[][MFD_NAME_SIZE] = {
- "led0-red",
- "led0-green",
- "led0-blue",
- "led1-red",
- "led1-green",
- "led1-blue",
-};
-EXPORT_SYMBOL(pm860x_led_name);
-
-#define PM8606_BACKLIGHT_RESOURCE(_i, _x) \
-{ \
- .name = pm860x_backlight_name[_i], \
- .start = PM8606_##_x, \
- .end = PM8606_##_x, \
- .flags = IORESOURCE_IO, \
-}
-static struct resource backlight_resources[] = {
- PM8606_BACKLIGHT_RESOURCE(PM8606_BACKLIGHT1, WLED1A),
- PM8606_BACKLIGHT_RESOURCE(PM8606_BACKLIGHT2, WLED2A),
- PM8606_BACKLIGHT_RESOURCE(PM8606_BACKLIGHT3, WLED3A),
+static struct resource led_resources[] __initdata = {
+ {PM8606_LED1_RED, PM8606_LED1_RED, "led0-red", IORESOURCE_IO,},
+ {PM8606_LED1_GREEN, PM8606_LED1_GREEN, "led0-green", IORESOURCE_IO,},
+ {PM8606_LED1_BLUE, PM8606_LED1_BLUE, "led0-blue", IORESOURCE_IO,},
+ {PM8606_LED2_RED, PM8606_LED2_RED, "led1-red", IORESOURCE_IO,},
+ {PM8606_LED2_GREEN, PM8606_LED2_GREEN, "led1-green", IORESOURCE_IO,},
+ {PM8606_LED2_BLUE, PM8606_LED2_BLUE, "led1-blue", IORESOURCE_IO,},
};
-#define PM8606_BACKLIGHT_DEVS(_i) \
-{ \
- .name = "88pm860x-backlight", \
- .num_resources = 1, \
- .resources = &backlight_resources[_i], \
- .id = _i, \
-}
-
-static struct mfd_cell backlight_devs[] = {
- PM8606_BACKLIGHT_DEVS(PM8606_BACKLIGHT1),
- PM8606_BACKLIGHT_DEVS(PM8606_BACKLIGHT2),
- PM8606_BACKLIGHT_DEVS(PM8606_BACKLIGHT3),
+static struct resource regulator_resources[] __initdata = {
+ {PM8607_ID_BUCK1, PM8607_ID_BUCK1, "buck-1", IORESOURCE_IO,},
+ {PM8607_ID_BUCK2, PM8607_ID_BUCK2, "buck-2", IORESOURCE_IO,},
+ {PM8607_ID_BUCK3, PM8607_ID_BUCK3, "buck-3", IORESOURCE_IO,},
+ {PM8607_ID_LDO1, PM8607_ID_LDO1, "ldo-01", IORESOURCE_IO,},
+ {PM8607_ID_LDO2, PM8607_ID_LDO2, "ldo-02", IORESOURCE_IO,},
+ {PM8607_ID_LDO3, PM8607_ID_LDO3, "ldo-03", IORESOURCE_IO,},
+ {PM8607_ID_LDO4, PM8607_ID_LDO4, "ldo-04", IORESOURCE_IO,},
+ {PM8607_ID_LDO5, PM8607_ID_LDO5, "ldo-05", IORESOURCE_IO,},
+ {PM8607_ID_LDO6, PM8607_ID_LDO6, "ldo-06", IORESOURCE_IO,},
+ {PM8607_ID_LDO7, PM8607_ID_LDO7, "ldo-07", IORESOURCE_IO,},
+ {PM8607_ID_LDO8, PM8607_ID_LDO8, "ldo-08", IORESOURCE_IO,},
+ {PM8607_ID_LDO9, PM8607_ID_LDO9, "ldo-09", IORESOURCE_IO,},
+ {PM8607_ID_LDO10, PM8607_ID_LDO10, "ldo-10", IORESOURCE_IO,},
+ {PM8607_ID_LDO11, PM8607_ID_LDO11, "ldo-11", IORESOURCE_IO,},
+ {PM8607_ID_LDO12, PM8607_ID_LDO12, "ldo-12", IORESOURCE_IO,},
+ {PM8607_ID_LDO13, PM8607_ID_LDO13, "ldo-13", IORESOURCE_IO,},
+ {PM8607_ID_LDO14, PM8607_ID_LDO14, "ldo-14", IORESOURCE_IO,},
+ {PM8607_ID_LDO15, PM8607_ID_LDO15, "ldo-15", IORESOURCE_IO,},
};
-#define PM8606_LED_RESOURCE(_i, _x) \
-{ \
- .name = pm860x_led_name[_i], \
- .start = PM8606_##_x, \
- .end = PM8606_##_x, \
- .flags = IORESOURCE_IO, \
-}
-
-static struct resource led_resources[] = {
- PM8606_LED_RESOURCE(PM8606_LED1_RED, RGB1B),
- PM8606_LED_RESOURCE(PM8606_LED1_GREEN, RGB1C),
- PM8606_LED_RESOURCE(PM8606_LED1_BLUE, RGB1D),
- PM8606_LED_RESOURCE(PM8606_LED2_RED, RGB2B),
- PM8606_LED_RESOURCE(PM8606_LED2_GREEN, RGB2C),
- PM8606_LED_RESOURCE(PM8606_LED2_BLUE, RGB2D),
+static struct resource touch_resources[] __initdata = {
+ {PM8607_IRQ_PEN, PM8607_IRQ_PEN, "touch", IORESOURCE_IRQ,},
};
-#define PM8606_LED_DEVS(_i) \
-{ \
- .name = "88pm860x-led", \
- .num_resources = 1, \
- .resources = &led_resources[_i], \
- .id = _i, \
-}
-
-static struct mfd_cell led_devs[] = {
- PM8606_LED_DEVS(PM8606_LED1_RED),
- PM8606_LED_DEVS(PM8606_LED1_GREEN),
- PM8606_LED_DEVS(PM8606_LED1_BLUE),
- PM8606_LED_DEVS(PM8606_LED2_RED),
- PM8606_LED_DEVS(PM8606_LED2_GREEN),
- PM8606_LED_DEVS(PM8606_LED2_BLUE),
+static struct resource onkey_resources[] __initdata = {
+ {PM8607_IRQ_ONKEY, PM8607_IRQ_ONKEY, "onkey", IORESOURCE_IRQ,},
};
-static struct resource touch_resources[] = {
- {
- .start = PM8607_IRQ_PEN,
- .end = PM8607_IRQ_PEN,
- .flags = IORESOURCE_IRQ,
- },
+static struct resource codec_resources[] __initdata = {
+ /* Headset microphone insertion or removal */
+ {PM8607_IRQ_MICIN, PM8607_IRQ_MICIN, "micin", IORESOURCE_IRQ,},
+ /* Hook-switch press or release */
+ {PM8607_IRQ_HOOK, PM8607_IRQ_HOOK, "hook", IORESOURCE_IRQ,},
+ /* Headset insertion or removal */
+ {PM8607_IRQ_HEADSET, PM8607_IRQ_HEADSET, "headset", IORESOURCE_IRQ,},
+ /* Audio short */
+ {PM8607_IRQ_AUDIO_SHORT, PM8607_IRQ_AUDIO_SHORT, "audio-short", IORESOURCE_IRQ,},
};
-static struct mfd_cell touch_devs[] = {
- {
- .name = "88pm860x-touch",
- .num_resources = 1,
- .resources = &touch_resources[0],
- },
+static struct resource battery_resources[] __initdata = {
+ {PM8607_IRQ_CC, PM8607_IRQ_CC, "columb counter", IORESOURCE_IRQ,},
+ {PM8607_IRQ_BAT, PM8607_IRQ_BAT, "battery", IORESOURCE_IRQ,},
};
-#define PM8607_REG_RESOURCE(_start, _end) \
-{ \
- .start = PM8607_##_start, \
- .end = PM8607_##_end, \
- .flags = IORESOURCE_IO, \
-}
-
-static struct resource power_supply_resources[] = {
- {
- .name = "88pm860x-power",
- .start = PM8607_IRQ_CHG,
- .end = PM8607_IRQ_CHG,
- .flags = IORESOURCE_IRQ,
- },
+static struct resource charger_resources[] __initdata = {
+ {PM8607_IRQ_CHG, PM8607_IRQ_CHG, "charger detect", IORESOURCE_IRQ,},
+ {PM8607_IRQ_CHG_DONE, PM8607_IRQ_CHG_DONE, "charging done", IORESOURCE_IRQ,},
+ {PM8607_IRQ_CHG_FAULT, PM8607_IRQ_CHG_FAULT, "charging timeout", IORESOURCE_IRQ,},
+ {PM8607_IRQ_GPADC1, PM8607_IRQ_GPADC1, "battery temperature", IORESOURCE_IRQ,},
+ {PM8607_IRQ_VBAT, PM8607_IRQ_VBAT, "battery voltage", IORESOURCE_IRQ,},
+ {PM8607_IRQ_VCHG, PM8607_IRQ_VCHG, "vchg voltage", IORESOURCE_IRQ,},
};
-static struct mfd_cell power_devs[] = {
- {
- .name = "88pm860x-power",
- .num_resources = 1,
- .resources = &power_supply_resources[0],
- .id = -1,
- },
+static struct mfd_cell bk_devs[] __initdata = {
+ {"88pm860x-backlight", 0,},
+ {"88pm860x-backlight", 1,},
+ {"88pm860x-backlight", 2,},
};
-static struct resource onkey_resources[] = {
- {
- .name = "88pm860x-onkey",
- .start = PM8607_IRQ_ONKEY,
- .end = PM8607_IRQ_ONKEY,
- .flags = IORESOURCE_IRQ,
- },
+static struct mfd_cell led_devs[] __initdata = {
+ {"88pm860x-led", 0,},
+ {"88pm860x-led", 1,},
+ {"88pm860x-led", 2,},
+ {"88pm860x-led", 3,},
+ {"88pm860x-led", 4,},
+ {"88pm860x-led", 5,},
};
-static struct mfd_cell onkey_devs[] = {
- {
- .name = "88pm860x-onkey",
- .num_resources = 1,
- .resources = &onkey_resources[0],
- .id = -1,
- },
+static struct mfd_cell regulator_devs[] __initdata = {
+ {"88pm860x-regulator", 0,},
+ {"88pm860x-regulator", 1,},
+ {"88pm860x-regulator", 2,},
+ {"88pm860x-regulator", 3,},
+ {"88pm860x-regulator", 4,},
+ {"88pm860x-regulator", 5,},
+ {"88pm860x-regulator", 6,},
+ {"88pm860x-regulator", 7,},
+ {"88pm860x-regulator", 8,},
+ {"88pm860x-regulator", 9,},
+ {"88pm860x-regulator", 10,},
+ {"88pm860x-regulator", 11,},
+ {"88pm860x-regulator", 12,},
+ {"88pm860x-regulator", 13,},
+ {"88pm860x-regulator", 14,},
+ {"88pm860x-regulator", 15,},
+ {"88pm860x-regulator", 16,},
+ {"88pm860x-regulator", 17,},
};
-static struct resource codec_resources[] = {
- {
- /* Headset microphone insertion or removal */
- .name = "micin",
- .start = PM8607_IRQ_MICIN,
- .end = PM8607_IRQ_MICIN,
- .flags = IORESOURCE_IRQ,
- }, {
- /* Hook-switch press or release */
- .name = "hook",
- .start = PM8607_IRQ_HOOK,
- .end = PM8607_IRQ_HOOK,
- .flags = IORESOURCE_IRQ,
- }, {
- /* Headset insertion or removal */
- .name = "headset",
- .start = PM8607_IRQ_HEADSET,
- .end = PM8607_IRQ_HEADSET,
- .flags = IORESOURCE_IRQ,
- }, {
- /* Audio short */
- .name = "audio-short",
- .start = PM8607_IRQ_AUDIO_SHORT,
- .end = PM8607_IRQ_AUDIO_SHORT,
- .flags = IORESOURCE_IRQ,
- },
+static struct mfd_cell touch_devs[] __initdata = {
+ {"88pm860x-touch", -1,},
};
-static struct mfd_cell codec_devs[] = {
- {
- .name = "88pm860x-codec",
- .num_resources = ARRAY_SIZE(codec_resources),
- .resources = &codec_resources[0],
- .id = -1,
- },
+static struct mfd_cell onkey_devs[] __initdata = {
+ {"88pm860x-onkey", -1,},
};
-static struct resource regulator_resources[] = {
- PM8607_REG_RESOURCE(BUCK1, BUCK1),
- PM8607_REG_RESOURCE(BUCK2, BUCK2),
- PM8607_REG_RESOURCE(BUCK3, BUCK3),
- PM8607_REG_RESOURCE(LDO1, LDO1),
- PM8607_REG_RESOURCE(LDO2, LDO2),
- PM8607_REG_RESOURCE(LDO3, LDO3),
- PM8607_REG_RESOURCE(LDO4, LDO4),
- PM8607_REG_RESOURCE(LDO5, LDO5),
- PM8607_REG_RESOURCE(LDO6, LDO6),
- PM8607_REG_RESOURCE(LDO7, LDO7),
- PM8607_REG_RESOURCE(LDO8, LDO8),
- PM8607_REG_RESOURCE(LDO9, LDO9),
- PM8607_REG_RESOURCE(LDO10, LDO10),
- PM8607_REG_RESOURCE(LDO12, LDO12),
- PM8607_REG_RESOURCE(VIBRATOR_SET, VIBRATOR_SET),
- PM8607_REG_RESOURCE(LDO14, LDO14),
+static struct mfd_cell codec_devs[] __initdata = {
+ {"88pm860x-codec", -1,},
};
-#define PM8607_REG_DEVS(_id) \
-{ \
- .name = "88pm860x-regulator", \
- .num_resources = 1, \
- .resources = &regulator_resources[PM8607_ID_##_id], \
- .id = PM8607_ID_##_id, \
-}
-
-static struct mfd_cell regulator_devs[] = {
- PM8607_REG_DEVS(BUCK1),
- PM8607_REG_DEVS(BUCK2),
- PM8607_REG_DEVS(BUCK3),
- PM8607_REG_DEVS(LDO1),
- PM8607_REG_DEVS(LDO2),
- PM8607_REG_DEVS(LDO3),
- PM8607_REG_DEVS(LDO4),
- PM8607_REG_DEVS(LDO5),
- PM8607_REG_DEVS(LDO6),
- PM8607_REG_DEVS(LDO7),
- PM8607_REG_DEVS(LDO8),
- PM8607_REG_DEVS(LDO9),
- PM8607_REG_DEVS(LDO10),
- PM8607_REG_DEVS(LDO12),
- PM8607_REG_DEVS(LDO13),
- PM8607_REG_DEVS(LDO14),
+static struct mfd_cell power_devs[] = {
+ {"88pm860x-battery", -1,},
+ {"88pm860x-charger", -1,},
};
+static struct pm860x_backlight_pdata bk_pdata[ARRAY_SIZE(bk_devs)];
+static struct pm860x_led_pdata led_pdata[ARRAY_SIZE(led_devs)];
+static struct regulator_init_data regulator_pdata[ARRAY_SIZE(regulator_devs)];
+static struct pm860x_touch_pdata touch_pdata;
+static struct pm860x_power_pdata power_pdata;
+
struct pm860x_irq_data {
int reg;
int mask_reg;
@@ -595,37 +503,212 @@ static void device_irq_exit(struct pm860x_chip *chip)
free_irq(chip->core_irq, chip);
}
-static void __devinit device_8606_init(struct pm860x_chip *chip,
- struct i2c_client *i2c,
- struct pm860x_platform_data *pdata)
+static void __devinit device_bk_init(struct pm860x_chip *chip,
+ struct i2c_client *i2c,
+ struct pm860x_platform_data *pdata)
{
int ret;
+ int i, j, id;
+
+ if ((pdata == NULL) || (pdata->backlight == NULL))
+ return;
+
+ if (pdata->num_backlights > ARRAY_SIZE(bk_devs))
+ pdata->num_backlights = ARRAY_SIZE(bk_devs);
+
+ for (i = 0; i < pdata->num_backlights; i++) {
+ memcpy(&bk_pdata[i], &pdata->backlight[i],
+ sizeof(struct pm860x_backlight_pdata));
+ bk_devs[i].mfd_data = &bk_pdata[i];
+
+ for (j = 0; j < ARRAY_SIZE(bk_devs); j++) {
+ id = bk_resources[j].start;
+ if (bk_pdata[i].flags != id)
+ continue;
+
+ bk_devs[i].num_resources = 1;
+ bk_devs[i].resources = &bk_resources[j];
+ ret = mfd_add_devices(chip->dev, 0,
+ &bk_devs[i], 1,
+ &bk_resources[j], 0);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to add "
+ "backlight subdev\n");
+ return;
+ }
+ }
+ }
+}
- if (pdata && pdata->backlight) {
- ret = mfd_add_devices(chip->dev, 0, &backlight_devs[0],
- ARRAY_SIZE(backlight_devs),
- &backlight_resources[0], 0);
- if (ret < 0) {
- dev_err(chip->dev, "Failed to add backlight "
- "subdev\n");
- goto out_dev;
+static void __devinit device_led_init(struct pm860x_chip *chip,
+ struct i2c_client *i2c,
+ struct pm860x_platform_data *pdata)
+{
+ int ret;
+ int i, j, id;
+
+ if ((pdata == NULL) || (pdata->led == NULL))
+ return;
+
+ if (pdata->num_leds > ARRAY_SIZE(led_devs))
+ pdata->num_leds = ARRAY_SIZE(led_devs);
+
+ for (i = 0; i < pdata->num_leds; i++) {
+ memcpy(&led_pdata[i], &pdata->led[i],
+ sizeof(struct pm860x_led_pdata));
+ led_devs[i].mfd_data = &led_pdata[i];
+
+ for (j = 0; j < ARRAY_SIZE(led_devs); j++) {
+ id = led_resources[j].start;
+ if (led_pdata[i].flags != id)
+ continue;
+
+ led_devs[i].num_resources = 1;
+ led_devs[i].resources = &led_resources[j],
+ ret = mfd_add_devices(chip->dev, 0,
+ &led_devs[i], 1,
+ &led_resources[j], 0);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to add "
+ "led subdev\n");
+ return;
+ }
}
}
+}
- if (pdata && pdata->led) {
- ret = mfd_add_devices(chip->dev, 0, &led_devs[0],
- ARRAY_SIZE(led_devs),
- &led_resources[0], 0);
+static void __devinit device_regulator_init(struct pm860x_chip *chip,
+ struct i2c_client *i2c,
+ struct pm860x_platform_data *pdata)
+{
+ struct regulator_init_data *initdata;
+ int ret;
+ int i, j;
+
+ if ((pdata == NULL) || (pdata->regulator == NULL))
+ return;
+
+ if (pdata->num_regulators > ARRAY_SIZE(regulator_devs))
+ pdata->num_regulators = ARRAY_SIZE(regulator_devs);
+
+ for (i = 0, j = -1; i < pdata->num_regulators; i++) {
+ initdata = &pdata->regulator[i];
+ if (strstr(initdata->constraints.name, "BUCK")) {
+ sscanf(initdata->constraints.name, "BUCK%d", &j);
+ /* BUCK1 ~ BUCK3 */
+ if ((j < 1) || (j > 3)) {
+ dev_err(chip->dev, "Failed to add constraint "
+ "(%s)\n", initdata->constraints.name);
+ goto out;
+ }
+ j = (j - 1) + PM8607_ID_BUCK1;
+ }
+ if (strstr(initdata->constraints.name, "LDO")) {
+ sscanf(initdata->constraints.name, "LDO%d", &j);
+ /* LDO1 ~ LDO15 */
+ if ((j < 1) || (j > 15)) {
+ dev_err(chip->dev, "Failed to add constraint "
+ "(%s)\n", initdata->constraints.name);
+ goto out;
+ }
+ j = (j - 1) + PM8607_ID_LDO1;
+ }
+ if (j == -1) {
+ dev_err(chip->dev, "Failed to add constraint (%s)\n",
+ initdata->constraints.name);
+ goto out;
+ }
+ memcpy(&regulator_pdata[i], &pdata->regulator[i],
+ sizeof(struct regulator_init_data));
+ regulator_devs[i].mfd_data = &regulator_pdata[i];
+ regulator_devs[i].num_resources = 1;
+ regulator_devs[i].resources = &regulator_resources[j];
+
+ ret = mfd_add_devices(chip->dev, 0, &regulator_devs[i], 1,
+ &regulator_resources[j], 0);
if (ret < 0) {
- dev_err(chip->dev, "Failed to add led "
- "subdev\n");
- goto out_dev;
+ dev_err(chip->dev, "Failed to add regulator subdev\n");
+ goto out;
}
}
+out:
return;
-out_dev:
- mfd_remove_devices(chip->dev);
- device_irq_exit(chip);
+}
+
+static void __devinit device_touch_init(struct pm860x_chip *chip,
+ struct i2c_client *i2c,
+ struct pm860x_platform_data *pdata)
+{
+ int ret;
+
+ if ((pdata == NULL) || (pdata->touch == NULL))
+ return;
+
+ memcpy(&touch_pdata, pdata->touch, sizeof(struct pm860x_touch_pdata));
+ touch_devs[0].mfd_data = &touch_pdata;
+ touch_devs[0].num_resources = ARRAY_SIZE(touch_resources);
+ touch_devs[0].resources = &touch_resources[0];
+ ret = mfd_add_devices(chip->dev, 0, &touch_devs[0],
+ ARRAY_SIZE(touch_devs), &touch_resources[0],
+ chip->irq_base);
+ if (ret < 0)
+ dev_err(chip->dev, "Failed to add touch subdev\n");
+}
+
+static void __devinit device_power_init(struct pm860x_chip *chip,
+ struct i2c_client *i2c,
+ struct pm860x_platform_data *pdata)
+{
+ int ret;
+
+ if ((pdata == NULL) || (pdata->power == NULL))
+ return;
+
+ memcpy(&power_pdata, pdata->power, sizeof(struct pm860x_power_pdata));
+ power_devs[0].mfd_data = &power_pdata;
+ power_devs[0].num_resources = ARRAY_SIZE(battery_resources);
+ power_devs[0].resources = &battery_resources[0],
+ ret = mfd_add_devices(chip->dev, 0, &power_devs[0], 1,
+ &battery_resources[0], chip->irq_base);
+ if (ret < 0)
+ dev_err(chip->dev, "Failed to add battery subdev\n");
+
+ power_devs[1].mfd_data = &power_pdata;
+ power_devs[1].num_resources = ARRAY_SIZE(charger_resources);
+ power_devs[1].resources = &charger_resources[0],
+ ret = mfd_add_devices(chip->dev, 0, &power_devs[1], 1,
+ &charger_resources[0], chip->irq_base);
+ if (ret < 0)
+ dev_err(chip->dev, "Failed to add charger subdev\n");
+}
+
+static void __devinit device_onkey_init(struct pm860x_chip *chip,
+ struct i2c_client *i2c,
+ struct pm860x_platform_data *pdata)
+{
+ int ret;
+
+ onkey_devs[0].num_resources = ARRAY_SIZE(onkey_resources);
+ onkey_devs[0].resources = &onkey_resources[0],
+ ret = mfd_add_devices(chip->dev, 0, &onkey_devs[0],
+ ARRAY_SIZE(onkey_devs), &onkey_resources[0],
+ chip->irq_base);
+ if (ret < 0)
+ dev_err(chip->dev, "Failed to add onkey subdev\n");
+}
+
+static void __devinit device_codec_init(struct pm860x_chip *chip,
+ struct i2c_client *i2c,
+ struct pm860x_platform_data *pdata)
+{
+ int ret;
+
+ codec_devs[0].num_resources = ARRAY_SIZE(codec_resources);
+ codec_devs[0].resources = &codec_resources[0],
+ ret = mfd_add_devices(chip->dev, 0, &codec_devs[0],
+ ARRAY_SIZE(codec_devs), &codec_resources[0], 0);
+ if (ret < 0)
+ dev_err(chip->dev, "Failed to add codec subdev\n");
}
static void __devinit device_8607_init(struct pm860x_chip *chip,
@@ -683,55 +766,11 @@ static void __devinit device_8607_init(struct pm860x_chip *chip,
if (ret < 0)
goto out;
- ret = mfd_add_devices(chip->dev, 0, &regulator_devs[0],
- ARRAY_SIZE(regulator_devs),
- &regulator_resources[0], 0);
- if (ret < 0) {
- dev_err(chip->dev, "Failed to add regulator subdev\n");
- goto out_dev;
- }
-
- if (pdata && pdata->touch) {
- ret = mfd_add_devices(chip->dev, 0, &touch_devs[0],
- ARRAY_SIZE(touch_devs),
- &touch_resources[0], 0);
- if (ret < 0) {
- dev_err(chip->dev, "Failed to add touch "
- "subdev\n");
- goto out_dev;
- }
- }
-
- if (pdata && pdata->power) {
- ret = mfd_add_devices(chip->dev, 0, &power_devs[0],
- ARRAY_SIZE(power_devs),
- &power_supply_resources[0], 0);
- if (ret < 0) {
- dev_err(chip->dev, "Failed to add power supply "
- "subdev\n");
- goto out_dev;
- }
- }
-
- ret = mfd_add_devices(chip->dev, 0, &onkey_devs[0],
- ARRAY_SIZE(onkey_devs),
- &onkey_resources[0], 0);
- if (ret < 0) {
- dev_err(chip->dev, "Failed to add onkey subdev\n");
- goto out_dev;
- }
-
- ret = mfd_add_devices(chip->dev, 0, &codec_devs[0],
- ARRAY_SIZE(codec_devs),
- &codec_resources[0], 0);
- if (ret < 0) {
- dev_err(chip->dev, "Failed to add codec subdev\n");
- goto out_dev;
- }
- return;
-out_dev:
- mfd_remove_devices(chip->dev);
- device_irq_exit(chip);
+ device_regulator_init(chip, i2c, pdata);
+ device_onkey_init(chip, i2c, pdata);
+ device_touch_init(chip, i2c, pdata);
+ device_power_init(chip, i2c, pdata);
+ device_codec_init(chip, i2c, pdata);
out:
return;
}
@@ -743,7 +782,8 @@ int __devinit pm860x_device_init(struct pm860x_chip *chip,
switch (chip->id) {
case CHIP_PM8606:
- device_8606_init(chip, chip->client, pdata);
+ device_bk_init(chip, chip->client, pdata);
+ device_led_init(chip, chip->client, pdata);
break;
case CHIP_PM8607:
device_8607_init(chip, chip->client, pdata);
@@ -753,7 +793,8 @@ int __devinit pm860x_device_init(struct pm860x_chip *chip,
if (chip->companion) {
switch (chip->id) {
case CHIP_PM8607:
- device_8606_init(chip, chip->companion, pdata);
+ device_bk_init(chip, chip->companion, pdata);
+ device_led_init(chip, chip->companion, pdata);
break;
case CHIP_PM8606:
device_8607_init(chip, chip->companion, pdata);
diff --git a/drivers/mfd/88pm860x-i2c.c b/drivers/mfd/88pm860x-i2c.c
index bc02e6b2160..e017dc88622 100644
--- a/drivers/mfd/88pm860x-i2c.c
+++ b/drivers/mfd/88pm860x-i2c.c
@@ -126,6 +126,109 @@ out:
}
EXPORT_SYMBOL(pm860x_set_bits);
+int pm860x_page_reg_read(struct i2c_client *i2c, int reg)
+{
+ struct pm860x_chip *chip = i2c_get_clientdata(i2c);
+ unsigned char zero = 0;
+ unsigned char data;
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ pm860x_write_device(i2c, 0xFA, 0, &zero);
+ pm860x_write_device(i2c, 0xFB, 0, &zero);
+ pm860x_write_device(i2c, 0xFF, 0, &zero);
+ ret = pm860x_read_device(i2c, reg, 1, &data);
+ if (ret >= 0)
+ ret = (int)data;
+ pm860x_write_device(i2c, 0xFE, 0, &zero);
+ pm860x_write_device(i2c, 0xFC, 0, &zero);
+ mutex_unlock(&chip->io_lock);
+ return ret;
+}
+EXPORT_SYMBOL(pm860x_page_reg_read);
+
+int pm860x_page_reg_write(struct i2c_client *i2c, int reg,
+ unsigned char data)
+{
+ struct pm860x_chip *chip = i2c_get_clientdata(i2c);
+ unsigned char zero;
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ pm860x_write_device(i2c, 0xFA, 0, &zero);
+ pm860x_write_device(i2c, 0xFB, 0, &zero);
+ pm860x_write_device(i2c, 0xFF, 0, &zero);
+ ret = pm860x_write_device(i2c, reg, 1, &data);
+ pm860x_write_device(i2c, 0xFE, 0, &zero);
+ pm860x_write_device(i2c, 0xFC, 0, &zero);
+ mutex_unlock(&chip->io_lock);
+ return ret;
+}
+EXPORT_SYMBOL(pm860x_page_reg_write);
+
+int pm860x_page_bulk_read(struct i2c_client *i2c, int reg,
+ int count, unsigned char *buf)
+{
+ struct pm860x_chip *chip = i2c_get_clientdata(i2c);
+ unsigned char zero = 0;
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ pm860x_write_device(i2c, 0xFA, 0, &zero);
+ pm860x_write_device(i2c, 0xFB, 0, &zero);
+ pm860x_write_device(i2c, 0xFF, 0, &zero);
+ ret = pm860x_read_device(i2c, reg, count, buf);
+ pm860x_write_device(i2c, 0xFE, 0, &zero);
+ pm860x_write_device(i2c, 0xFC, 0, &zero);
+ mutex_unlock(&chip->io_lock);
+ return ret;
+}
+EXPORT_SYMBOL(pm860x_page_bulk_read);
+
+int pm860x_page_bulk_write(struct i2c_client *i2c, int reg,
+ int count, unsigned char *buf)
+{
+ struct pm860x_chip *chip = i2c_get_clientdata(i2c);
+ unsigned char zero = 0;
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ pm860x_write_device(i2c, 0xFA, 0, &zero);
+ pm860x_write_device(i2c, 0xFB, 0, &zero);
+ pm860x_write_device(i2c, 0xFF, 0, &zero);
+ ret = pm860x_write_device(i2c, reg, count, buf);
+ pm860x_write_device(i2c, 0xFE, 0, &zero);
+ pm860x_write_device(i2c, 0xFC, 0, &zero);
+ mutex_unlock(&chip->io_lock);
+ return ret;
+}
+EXPORT_SYMBOL(pm860x_page_bulk_write);
+
+int pm860x_page_set_bits(struct i2c_client *i2c, int reg,
+ unsigned char mask, unsigned char data)
+{
+ struct pm860x_chip *chip = i2c_get_clientdata(i2c);
+ unsigned char zero;
+ unsigned char value;
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ pm860x_write_device(i2c, 0xFA, 0, &zero);
+ pm860x_write_device(i2c, 0xFB, 0, &zero);
+ pm860x_write_device(i2c, 0xFF, 0, &zero);
+ ret = pm860x_read_device(i2c, reg, 1, &value);
+ if (ret < 0)
+ goto out;
+ value &= ~mask;
+ value |= data;
+ ret = pm860x_write_device(i2c, reg, 1, &value);
+out:
+ pm860x_write_device(i2c, 0xFE, 0, &zero);
+ pm860x_write_device(i2c, 0xFC, 0, &zero);
+ mutex_unlock(&chip->io_lock);
+ return ret;
+}
+EXPORT_SYMBOL(pm860x_page_set_bits);
static const struct i2c_device_id pm860x_id_table[] = {
{ "88PM860x", 0 },
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index fd018366d67..8d7d098f7a0 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -81,6 +81,17 @@ config MFD_DM355EVM_MSP
boards. MSP430 firmware manages resets and power sequencing,
inputs from buttons and the IR remote, LEDs, an RTC, and more.
+config MFD_TI_SSP
+ tristate "TI Sequencer Serial Port support"
+ depends on ARCH_DAVINCI_TNETV107X
+ select MFD_CORE
+ ---help---
+ Say Y here if you want support for the Sequencer Serial Port
+ in a Texas Instruments TNETV107X SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ti-ssp.
+
config HTC_EGPIO
bool "HTC EGPIO support"
depends on GENERIC_HARDIRQS && GPIOLIB && ARM
@@ -118,6 +129,17 @@ config UCB1400_CORE
To compile this driver as a module, choose M here: the
module will be called ucb1400_core.
+config TPS6105X
+ tristate "TPS61050/61052 Boost Converters"
+ depends on I2C
+ select REGULATOR
+ select REGULATOR_FIXED_VOLTAGE
+ help
+ This option enables a driver for the TP61050/TPS61052
+ high-power "white LED driver". This boost converter is
+ sometimes used for other things than white LEDs, and
+ also contains a GPIO pin.
+
config TPS65010
tristate "TPS6501x Power Management chips"
depends on I2C && GPIOLIB
@@ -167,6 +189,16 @@ config TWL4030_CORE
high speed USB OTG transceiver, an audio codec (on most
versions) and many other features.
+config TWL4030_MADC
+ tristate "Texas Instruments TWL4030 MADC"
+ depends on TWL4030_CORE
+ help
+ This driver provides support for triton TWL4030-MADC. The
+ driver supports both RT and SW conversion methods.
+
+ This driver can be built as a module. If so it will be
+ named twl4030-madc
+
config TWL4030_POWER
bool "Support power resources on TWL4030 family chips"
depends on TWL4030_CORE && ARM
@@ -293,6 +325,18 @@ config MFD_MAX8925
accessing the device, additional drivers must be enabled in order
to use the functionality of the device.
+config MFD_MAX8997
+ bool "Maxim Semiconductor MAX8997/8966 PMIC Support"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ help
+ Say yes here to support for Maxim Semiconductor MAX8998/8966.
+ This is a Power Management IC with RTC, Flash, Fuel Gauge, Haptic,
+ MUIC controls on chip.
+ This driver provides common support for accessing the device;
+ additional drivers must be enabled in order to use the functionality
+ of the device.
+
config MFD_MAX8998
bool "Maxim Semiconductor MAX8998/National LP3974 PMIC Support"
depends on I2C=y && GENERIC_HARDIRQS
@@ -523,6 +567,13 @@ config AB8500_DEBUG
Select this option if you want debug information using the debug
filesystem, debugfs.
+config AB8500_GPADC
+ bool "AB8500 GPADC driver"
+ depends on AB8500_CORE && REGULATOR_AB8500
+ default y
+ help
+ AB8500 GPADC driver used to convert Acc and battery/ac/usb voltage
+
config AB3550_CORE
bool "ST-Ericsson AB3550 Mixed Signal Circuit core functions"
select MFD_CORE
@@ -624,6 +675,15 @@ config MFD_WL1273_CORE
driver connects the radio-wl1273 V4L2 module and the wl1273
audio codec.
+config MFD_OMAP_USB_HOST
+ bool "Support OMAP USBHS core driver"
+ depends on USB_EHCI_HCD_OMAP || USB_OHCI_HCD_OMAP3
+ default y
+ help
+ This is the core driver for the OAMP EHCI and OHCI drivers.
+ This MFD driver does the required setup functionalities for
+ OMAP USB Host drivers.
+
endif # MFD_SUPPORT
menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index a54e2c7c6a1..47f5709f382 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_HTC_I2CPLD) += htc-i2cpld.o
obj-$(CONFIG_MFD_DAVINCI_VOICECODEC) += davinci_voicecodec.o
obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o
+obj-$(CONFIG_MFD_TI_SSP) += ti-ssp.o
obj-$(CONFIG_MFD_STMPE) += stmpe.o
obj-$(CONFIG_MFD_TC3589X) += tc3589x.o
@@ -32,11 +33,13 @@ obj-$(CONFIG_MFD_WM8350) += wm8350.o
obj-$(CONFIG_MFD_WM8350_I2C) += wm8350-i2c.o
obj-$(CONFIG_MFD_WM8994) += wm8994-core.o wm8994-irq.o
+obj-$(CONFIG_TPS6105X) += tps6105x.o
obj-$(CONFIG_TPS65010) += tps65010.o
obj-$(CONFIG_TPS6507X) += tps6507x.o
obj-$(CONFIG_MENELAUS) += menelaus.o
obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o
+obj-$(CONFIG_TWL4030_MADC) += twl4030-madc.o
obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o
obj-$(CONFIG_TWL4030_CODEC) += twl4030-codec.o
obj-$(CONFIG_TWL6030_PWM) += twl6030-pwm.o
@@ -60,6 +63,7 @@ obj-$(CONFIG_UCB1400_CORE) += ucb1400_core.o
obj-$(CONFIG_PMIC_DA903X) += da903x.o
max8925-objs := max8925-core.o max8925-i2c.o
obj-$(CONFIG_MFD_MAX8925) += max8925.o
+obj-$(CONFIG_MFD_MAX8997) += max8997.o
obj-$(CONFIG_MFD_MAX8998) += max8998.o max8998-irq.o
pcf50633-objs := pcf50633-core.o pcf50633-irq.o
@@ -70,9 +74,10 @@ obj-$(CONFIG_ABX500_CORE) += abx500-core.o
obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o
obj-$(CONFIG_AB3550_CORE) += ab3550-core.o
-obj-$(CONFIG_AB8500_CORE) += ab8500-core.o
+obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o
obj-$(CONFIG_AB8500_I2C_CORE) += ab8500-i2c.o
obj-$(CONFIG_AB8500_DEBUG) += ab8500-debugfs.o
+obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
obj-$(CONFIG_PMIC_ADP5520) += adp5520.o
obj-$(CONFIG_LPC_SCH) += lpc_sch.o
@@ -83,3 +88,4 @@ obj-$(CONFIG_MFD_TPS6586X) += tps6586x.o
obj-$(CONFIG_MFD_VX855) += vx855.o
obj-$(CONFIG_MFD_WL1273_CORE) += wl1273-core.o
obj-$(CONFIG_MFD_CS5535) += cs5535-mfd.o
+obj-$(CONFIG_MFD_OMAP_USB_HOST) += omap-usb-host.o
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index 1707d224232..a751927047a 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -949,10 +949,8 @@ static int __devinit ab3100_probe(struct i2c_client *client,
goto exit_no_ops;
/* Set up and register the platform devices. */
- for (i = 0; i < ARRAY_SIZE(ab3100_devs); i++) {
- ab3100_devs[i].platform_data = ab3100_plf_data;
- ab3100_devs[i].data_size = sizeof(struct ab3100_platform_data);
- }
+ for (i = 0; i < ARRAY_SIZE(ab3100_devs); i++)
+ ab3100_devs[i].mfd_data = ab3100_plf_data;
err = mfd_add_devices(&client->dev, 0, ab3100_devs,
ARRAY_SIZE(ab3100_devs), NULL, 0);
diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c
index 681984df1c2..c12d0428522 100644
--- a/drivers/mfd/ab3550-core.c
+++ b/drivers/mfd/ab3550-core.c
@@ -1320,10 +1320,8 @@ static int __init ab3550_probe(struct i2c_client *client,
goto exit_no_ops;
/* Set up and register the platform devices. */
- for (i = 0; i < AB3550_NUM_DEVICES; i++) {
- ab3550_devs[i].platform_data = ab3550_plf_data->dev_data[i];
- ab3550_devs[i].data_size = ab3550_plf_data->dev_data_sz[i];
- }
+ for (i = 0; i < AB3550_NUM_DEVICES; i++)
+ ab3550_devs[i].mfd_data = ab3550_plf_data->dev_data[i];
err = mfd_add_devices(&client->dev, 0, ab3550_devs,
ARRAY_SIZE(ab3550_devs), NULL,
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index b6887014d68..3b43b3d2499 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -4,7 +4,7 @@
* License Terms: GNU General Public License v2
* Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
* Author: Rabin Vincent <rabin.vincent@stericsson.com>
- * Changes: Mattias Wallin <mattias.wallin@stericsson.com>
+ * Author: Mattias Wallin <mattias.wallin@stericsson.com>
*/
#include <linux/kernel.h>
@@ -90,6 +90,7 @@
#define AB8500_IT_MASK24_REG 0x57
#define AB8500_REV_REG 0x80
+#define AB8500_SWITCH_OFF_STATUS 0x00
/*
* Map interrupt numbers to the LATCH and MASK register offsets, Interrupt
@@ -361,6 +362,15 @@ static void ab8500_irq_remove(struct ab8500 *ab8500)
}
}
+static struct resource ab8500_gpio_resources[] = {
+ {
+ .name = "GPIO_INT6",
+ .start = AB8500_INT_GPIO6R,
+ .end = AB8500_INT_GPIO41F,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
static struct resource ab8500_gpadc_resources[] = {
{
.name = "HW_CONV_END",
@@ -595,6 +605,11 @@ static struct mfd_cell ab8500_devs[] = {
.name = "ab8500-regulator",
},
{
+ .name = "ab8500-gpio",
+ .num_resources = ARRAY_SIZE(ab8500_gpio_resources),
+ .resources = ab8500_gpio_resources,
+ },
+ {
.name = "ab8500-gpadc",
.num_resources = ARRAY_SIZE(ab8500_gpadc_resources),
.resources = ab8500_gpadc_resources,
@@ -652,10 +667,38 @@ static ssize_t show_chip_id(struct device *dev,
return sprintf(buf, "%#x\n", ab8500 ? ab8500->chip_id : -EINVAL);
}
+/*
+ * ab8500 has switched off due to (SWITCH_OFF_STATUS):
+ * 0x01 Swoff bit programming
+ * 0x02 Thermal protection activation
+ * 0x04 Vbat lower then BattOk falling threshold
+ * 0x08 Watchdog expired
+ * 0x10 Non presence of 32kHz clock
+ * 0x20 Battery level lower than power on reset threshold
+ * 0x40 Power on key 1 pressed longer than 10 seconds
+ * 0x80 DB8500 thermal shutdown
+ */
+static ssize_t show_switch_off_status(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ u8 value;
+ struct ab8500 *ab8500;
+
+ ab8500 = dev_get_drvdata(dev);
+ ret = get_register_interruptible(ab8500, AB8500_RTC,
+ AB8500_SWITCH_OFF_STATUS, &value);
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%#x\n", value);
+}
+
static DEVICE_ATTR(chip_id, S_IRUGO, show_chip_id, NULL);
+static DEVICE_ATTR(switch_off_status, S_IRUGO, show_switch_off_status, NULL);
static struct attribute *ab8500_sysfs_entries[] = {
&dev_attr_chip_id.attr,
+ &dev_attr_switch_off_status.attr,
NULL,
};
@@ -686,9 +729,10 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
* 0x10 - Cut 1.0
* 0x11 - Cut 1.1
* 0x20 - Cut 2.0
+ * 0x30 - Cut 3.0
*/
- if (value == 0x0 || value == 0x10 || value == 0x11 || value == 0x20) {
- ab8500->revision = value;
+ if (value == 0x0 || value == 0x10 || value == 0x11 || value == 0x20 ||
+ value == 0x30) {
dev_info(ab8500->dev, "detected chip, revision: %#x\n", value);
} else {
dev_err(ab8500->dev, "unknown chip, revision: %#x\n", value);
@@ -696,6 +740,24 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
}
ab8500->chip_id = value;
+ /*
+ * ab8500 has switched off due to (SWITCH_OFF_STATUS):
+ * 0x01 Swoff bit programming
+ * 0x02 Thermal protection activation
+ * 0x04 Vbat lower then BattOk falling threshold
+ * 0x08 Watchdog expired
+ * 0x10 Non presence of 32kHz clock
+ * 0x20 Battery level lower than power on reset threshold
+ * 0x40 Power on key 1 pressed longer than 10 seconds
+ * 0x80 DB8500 thermal shutdown
+ */
+
+ ret = get_register_interruptible(ab8500, AB8500_RTC,
+ AB8500_SWITCH_OFF_STATUS, &value);
+ if (ret < 0)
+ return ret;
+ dev_info(ab8500->dev, "switch off status: %#x", value);
+
if (plat && plat->init)
plat->init(ab8500);
@@ -764,6 +826,6 @@ int __devexit ab8500_exit(struct ab8500 *ab8500)
return 0;
}
-MODULE_AUTHOR("Srinidhi Kasagar, Rabin Vincent");
+MODULE_AUTHOR("Mattias Wallin, Srinidhi Kasagar, Rabin Vincent");
MODULE_DESCRIPTION("AB8500 MFD core");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
new file mode 100644
index 00000000000..bc93b2e8230
--- /dev/null
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -0,0 +1,614 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Arun R Murthy <arun.murthy@stericsson.com>
+ * Author: Daniel Willerud <daniel.willerud@stericsson.com>
+ * Author: Johan Palsson <johan.palsson@stericsson.com>
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/completion.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/mfd/ab8500.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/ab8500/gpadc.h>
+
+/*
+ * GPADC register offsets
+ * Bank : 0x0A
+ */
+#define AB8500_GPADC_CTRL1_REG 0x00
+#define AB8500_GPADC_CTRL2_REG 0x01
+#define AB8500_GPADC_CTRL3_REG 0x02
+#define AB8500_GPADC_AUTO_TIMER_REG 0x03
+#define AB8500_GPADC_STAT_REG 0x04
+#define AB8500_GPADC_MANDATAL_REG 0x05
+#define AB8500_GPADC_MANDATAH_REG 0x06
+#define AB8500_GPADC_AUTODATAL_REG 0x07
+#define AB8500_GPADC_AUTODATAH_REG 0x08
+#define AB8500_GPADC_MUX_CTRL_REG 0x09
+
+/*
+ * OTP register offsets
+ * Bank : 0x15
+ */
+#define AB8500_GPADC_CAL_1 0x0F
+#define AB8500_GPADC_CAL_2 0x10
+#define AB8500_GPADC_CAL_3 0x11
+#define AB8500_GPADC_CAL_4 0x12
+#define AB8500_GPADC_CAL_5 0x13
+#define AB8500_GPADC_CAL_6 0x14
+#define AB8500_GPADC_CAL_7 0x15
+
+/* gpadc constants */
+#define EN_VINTCORE12 0x04
+#define EN_VTVOUT 0x02
+#define EN_GPADC 0x01
+#define DIS_GPADC 0x00
+#define SW_AVG_16 0x60
+#define ADC_SW_CONV 0x04
+#define EN_ICHAR 0x80
+#define EN_BUF 0x40
+#define DIS_ZERO 0x00
+#define GPADC_BUSY 0x01
+
+/* GPADC constants from AB8500 spec, UM0836 */
+#define ADC_RESOLUTION 1024
+#define ADC_CH_BTEMP_MIN 0
+#define ADC_CH_BTEMP_MAX 1350
+#define ADC_CH_DIETEMP_MIN 0
+#define ADC_CH_DIETEMP_MAX 1350
+#define ADC_CH_CHG_V_MIN 0
+#define ADC_CH_CHG_V_MAX 20030
+#define ADC_CH_ACCDET2_MIN 0
+#define ADC_CH_ACCDET2_MAX 2500
+#define ADC_CH_VBAT_MIN 2300
+#define ADC_CH_VBAT_MAX 4800
+#define ADC_CH_CHG_I_MIN 0
+#define ADC_CH_CHG_I_MAX 1500
+#define ADC_CH_BKBAT_MIN 0
+#define ADC_CH_BKBAT_MAX 3200
+
+/* This is used to not lose precision when dividing to get gain and offset */
+#define CALIB_SCALE 1000
+
+enum cal_channels {
+ ADC_INPUT_VMAIN = 0,
+ ADC_INPUT_BTEMP,
+ ADC_INPUT_VBAT,
+ NBR_CAL_INPUTS,
+};
+
+/**
+ * struct adc_cal_data - Table for storing gain and offset for the calibrated
+ * ADC channels
+ * @gain: Gain of the ADC channel
+ * @offset: Offset of the ADC channel
+ */
+struct adc_cal_data {
+ u64 gain;
+ u64 offset;
+};
+
+/**
+ * struct ab8500_gpadc - AB8500 GPADC device information
+ * @dev: pointer to the struct device
+ * @node: a list of AB8500 GPADCs, hence prepared for
+ reentrance
+ * @ab8500_gpadc_complete: pointer to the struct completion, to indicate
+ * the completion of gpadc conversion
+ * @ab8500_gpadc_lock: structure of type mutex
+ * @regu: pointer to the struct regulator
+ * @irq: interrupt number that is used by gpadc
+ * @cal_data array of ADC calibration data structs
+ */
+struct ab8500_gpadc {
+ struct device *dev;
+ struct list_head node;
+ struct completion ab8500_gpadc_complete;
+ struct mutex ab8500_gpadc_lock;
+ struct regulator *regu;
+ int irq;
+ struct adc_cal_data cal_data[NBR_CAL_INPUTS];
+};
+
+static LIST_HEAD(ab8500_gpadc_list);
+
+/**
+ * ab8500_gpadc_get() - returns a reference to the primary AB8500 GPADC
+ * (i.e. the first GPADC in the instance list)
+ */
+struct ab8500_gpadc *ab8500_gpadc_get(char *name)
+{
+ struct ab8500_gpadc *gpadc;
+
+ list_for_each_entry(gpadc, &ab8500_gpadc_list, node) {
+ if (!strcmp(name, dev_name(gpadc->dev)))
+ return gpadc;
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL(ab8500_gpadc_get);
+
+static int ab8500_gpadc_ad_to_voltage(struct ab8500_gpadc *gpadc, u8 input,
+ int ad_value)
+{
+ int res;
+
+ switch (input) {
+ case MAIN_CHARGER_V:
+ /* For some reason we don't have calibrated data */
+ if (!gpadc->cal_data[ADC_INPUT_VMAIN].gain) {
+ res = ADC_CH_CHG_V_MIN + (ADC_CH_CHG_V_MAX -
+ ADC_CH_CHG_V_MIN) * ad_value /
+ ADC_RESOLUTION;
+ break;
+ }
+ /* Here we can use the calibrated data */
+ res = (int) (ad_value * gpadc->cal_data[ADC_INPUT_VMAIN].gain +
+ gpadc->cal_data[ADC_INPUT_VMAIN].offset) / CALIB_SCALE;
+ break;
+
+ case BAT_CTRL:
+ case BTEMP_BALL:
+ case ACC_DETECT1:
+ case ADC_AUX1:
+ case ADC_AUX2:
+ /* For some reason we don't have calibrated data */
+ if (!gpadc->cal_data[ADC_INPUT_BTEMP].gain) {
+ res = ADC_CH_BTEMP_MIN + (ADC_CH_BTEMP_MAX -
+ ADC_CH_BTEMP_MIN) * ad_value /
+ ADC_RESOLUTION;
+ break;
+ }
+ /* Here we can use the calibrated data */
+ res = (int) (ad_value * gpadc->cal_data[ADC_INPUT_BTEMP].gain +
+ gpadc->cal_data[ADC_INPUT_BTEMP].offset) / CALIB_SCALE;
+ break;
+
+ case MAIN_BAT_V:
+ /* For some reason we don't have calibrated data */
+ if (!gpadc->cal_data[ADC_INPUT_VBAT].gain) {
+ res = ADC_CH_VBAT_MIN + (ADC_CH_VBAT_MAX -
+ ADC_CH_VBAT_MIN) * ad_value /
+ ADC_RESOLUTION;
+ break;
+ }
+ /* Here we can use the calibrated data */
+ res = (int) (ad_value * gpadc->cal_data[ADC_INPUT_VBAT].gain +
+ gpadc->cal_data[ADC_INPUT_VBAT].offset) / CALIB_SCALE;
+ break;
+
+ case DIE_TEMP:
+ res = ADC_CH_DIETEMP_MIN +
+ (ADC_CH_DIETEMP_MAX - ADC_CH_DIETEMP_MIN) * ad_value /
+ ADC_RESOLUTION;
+ break;
+
+ case ACC_DETECT2:
+ res = ADC_CH_ACCDET2_MIN +
+ (ADC_CH_ACCDET2_MAX - ADC_CH_ACCDET2_MIN) * ad_value /
+ ADC_RESOLUTION;
+ break;
+
+ case VBUS_V:
+ res = ADC_CH_CHG_V_MIN +
+ (ADC_CH_CHG_V_MAX - ADC_CH_CHG_V_MIN) * ad_value /
+ ADC_RESOLUTION;
+ break;
+
+ case MAIN_CHARGER_C:
+ case USB_CHARGER_C:
+ res = ADC_CH_CHG_I_MIN +
+ (ADC_CH_CHG_I_MAX - ADC_CH_CHG_I_MIN) * ad_value /
+ ADC_RESOLUTION;
+ break;
+
+ case BK_BAT_V:
+ res = ADC_CH_BKBAT_MIN +
+ (ADC_CH_BKBAT_MAX - ADC_CH_BKBAT_MIN) * ad_value /
+ ADC_RESOLUTION;
+ break;
+
+ default:
+ dev_err(gpadc->dev,
+ "unknown channel, not possible to convert\n");
+ res = -EINVAL;
+ break;
+
+ }
+ return res;
+}
+
+/**
+ * ab8500_gpadc_convert() - gpadc conversion
+ * @input: analog input to be converted to digital data
+ *
+ * This function converts the selected analog i/p to digital
+ * data.
+ */
+int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 input)
+{
+ int ret;
+ u16 data = 0;
+ int looplimit = 0;
+ u8 val, low_data, high_data;
+
+ if (!gpadc)
+ return -ENODEV;
+
+ mutex_lock(&gpadc->ab8500_gpadc_lock);
+ /* Enable VTVout LDO this is required for GPADC */
+ regulator_enable(gpadc->regu);
+
+ /* Check if ADC is not busy, lock and proceed */
+ do {
+ ret = abx500_get_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_STAT_REG, &val);
+ if (ret < 0)
+ goto out;
+ if (!(val & GPADC_BUSY))
+ break;
+ msleep(10);
+ } while (++looplimit < 10);
+ if (looplimit >= 10 && (val & GPADC_BUSY)) {
+ dev_err(gpadc->dev, "gpadc_conversion: GPADC busy");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Enable GPADC */
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_CTRL1_REG, EN_GPADC, EN_GPADC);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc_conversion: enable gpadc failed\n");
+ goto out;
+ }
+ /* Select the input source and set average samples to 16 */
+ ret = abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
+ AB8500_GPADC_CTRL2_REG, (input | SW_AVG_16));
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: set avg samples failed\n");
+ goto out;
+ }
+ /*
+ * Enable ADC, buffering, select rising edge and enable ADC path
+ * charging current sense if it needed
+ */
+ switch (input) {
+ case MAIN_CHARGER_C:
+ case USB_CHARGER_C:
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_CTRL1_REG,
+ EN_BUF | EN_ICHAR,
+ EN_BUF | EN_ICHAR);
+ break;
+ default:
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_CTRL1_REG, EN_BUF, EN_BUF);
+ break;
+ }
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: select falling edge failed\n");
+ goto out;
+ }
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_CTRL1_REG, ADC_SW_CONV, ADC_SW_CONV);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: start s/w conversion failed\n");
+ goto out;
+ }
+ /* wait for completion of conversion */
+ if (!wait_for_completion_timeout(&gpadc->ab8500_gpadc_complete, 2*HZ)) {
+ dev_err(gpadc->dev,
+ "timeout: didnt recieve GPADC conversion interrupt\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Read the converted RAW data */
+ ret = abx500_get_register_interruptible(gpadc->dev, AB8500_GPADC,
+ AB8500_GPADC_MANDATAL_REG, &low_data);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc_conversion: read low data failed\n");
+ goto out;
+ }
+
+ ret = abx500_get_register_interruptible(gpadc->dev, AB8500_GPADC,
+ AB8500_GPADC_MANDATAH_REG, &high_data);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: read high data failed\n");
+ goto out;
+ }
+
+ data = (high_data << 8) | low_data;
+ /* Disable GPADC */
+ ret = abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
+ AB8500_GPADC_CTRL1_REG, DIS_GPADC);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc_conversion: disable gpadc failed\n");
+ goto out;
+ }
+ /* Disable VTVout LDO this is required for GPADC */
+ regulator_disable(gpadc->regu);
+ mutex_unlock(&gpadc->ab8500_gpadc_lock);
+ ret = ab8500_gpadc_ad_to_voltage(gpadc, input, data);
+ return ret;
+
+out:
+ /*
+ * It has shown to be needed to turn off the GPADC if an error occurs,
+ * otherwise we might have problem when waiting for the busy bit in the
+ * GPADC status register to go low. In V1.1 there wait_for_completion
+ * seems to timeout when waiting for an interrupt.. Not seen in V2.0
+ */
+ (void) abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
+ AB8500_GPADC_CTRL1_REG, DIS_GPADC);
+ regulator_disable(gpadc->regu);
+ mutex_unlock(&gpadc->ab8500_gpadc_lock);
+ dev_err(gpadc->dev,
+ "gpadc_conversion: Failed to AD convert channel %d\n", input);
+ return ret;
+}
+EXPORT_SYMBOL(ab8500_gpadc_convert);
+
+/**
+ * ab8500_bm_gpswadcconvend_handler() - isr for s/w gpadc conversion completion
+ * @irq: irq number
+ * @data: pointer to the data passed during request irq
+ *
+ * This is a interrupt service routine for s/w gpadc conversion completion.
+ * Notifies the gpadc completion is completed and the converted raw value
+ * can be read from the registers.
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_bm_gpswadcconvend_handler(int irq, void *_gpadc)
+{
+ struct ab8500_gpadc *gpadc = _gpadc;
+
+ complete(&gpadc->ab8500_gpadc_complete);
+
+ return IRQ_HANDLED;
+}
+
+static int otp_cal_regs[] = {
+ AB8500_GPADC_CAL_1,
+ AB8500_GPADC_CAL_2,
+ AB8500_GPADC_CAL_3,
+ AB8500_GPADC_CAL_4,
+ AB8500_GPADC_CAL_5,
+ AB8500_GPADC_CAL_6,
+ AB8500_GPADC_CAL_7,
+};
+
+static void ab8500_gpadc_read_calibration_data(struct ab8500_gpadc *gpadc)
+{
+ int i;
+ int ret[ARRAY_SIZE(otp_cal_regs)];
+ u8 gpadc_cal[ARRAY_SIZE(otp_cal_regs)];
+
+ int vmain_high, vmain_low;
+ int btemp_high, btemp_low;
+ int vbat_high, vbat_low;
+
+ /* First we read all OTP registers and store the error code */
+ for (i = 0; i < ARRAY_SIZE(otp_cal_regs); i++) {
+ ret[i] = abx500_get_register_interruptible(gpadc->dev,
+ AB8500_OTP_EMUL, otp_cal_regs[i], &gpadc_cal[i]);
+ if (ret[i] < 0)
+ dev_err(gpadc->dev, "%s: read otp reg 0x%02x failed\n",
+ __func__, otp_cal_regs[i]);
+ }
+
+ /*
+ * The ADC calibration data is stored in OTP registers.
+ * The layout of the calibration data is outlined below and a more
+ * detailed description can be found in UM0836
+ *
+ * vm_h/l = vmain_high/low
+ * bt_h/l = btemp_high/low
+ * vb_h/l = vbat_high/low
+ *
+ * Data bits:
+ * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | | vm_h9 | vm_h8
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | | vm_h7 | vm_h6 | vm_h5 | vm_h4 | vm_h3 | vm_h2
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vm_h1 | vm_h0 | vm_l4 | vm_l3 | vm_l2 | vm_l1 | vm_l0 | bt_h9
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | bt_h8 | bt_h7 | bt_h6 | bt_h5 | bt_h4 | bt_h3 | bt_h2 | bt_h1
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | bt_h0 | bt_l4 | bt_l3 | bt_l2 | bt_l1 | bt_l0 | vb_h9 | vb_h8
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vb_h7 | vb_h6 | vb_h5 | vb_h4 | vb_h3 | vb_h2 | vb_h1 | vb_h0
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vb_l5 | vb_l4 | vb_l3 | vb_l2 | vb_l1 | vb_l0 |
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ *
+ *
+ * Ideal output ADC codes corresponding to injected input voltages
+ * during manufacturing is:
+ *
+ * vmain_high: Vin = 19500mV / ADC ideal code = 997
+ * vmain_low: Vin = 315mV / ADC ideal code = 16
+ * btemp_high: Vin = 1300mV / ADC ideal code = 985
+ * btemp_low: Vin = 21mV / ADC ideal code = 16
+ * vbat_high: Vin = 4700mV / ADC ideal code = 982
+ * vbat_low: Vin = 2380mV / ADC ideal code = 33
+ */
+
+ /* Calculate gain and offset for VMAIN if all reads succeeded */
+ if (!(ret[0] < 0 || ret[1] < 0 || ret[2] < 0)) {
+ vmain_high = (((gpadc_cal[0] & 0x03) << 8) |
+ ((gpadc_cal[1] & 0x3F) << 2) |
+ ((gpadc_cal[2] & 0xC0) >> 6));
+
+ vmain_low = ((gpadc_cal[2] & 0x3E) >> 1);
+
+ gpadc->cal_data[ADC_INPUT_VMAIN].gain = CALIB_SCALE *
+ (19500 - 315) / (vmain_high - vmain_low);
+
+ gpadc->cal_data[ADC_INPUT_VMAIN].offset = CALIB_SCALE * 19500 -
+ (CALIB_SCALE * (19500 - 315) /
+ (vmain_high - vmain_low)) * vmain_high;
+ } else {
+ gpadc->cal_data[ADC_INPUT_VMAIN].gain = 0;
+ }
+
+ /* Calculate gain and offset for BTEMP if all reads succeeded */
+ if (!(ret[2] < 0 || ret[3] < 0 || ret[4] < 0)) {
+ btemp_high = (((gpadc_cal[2] & 0x01) << 9) |
+ (gpadc_cal[3] << 1) |
+ ((gpadc_cal[4] & 0x80) >> 7));
+
+ btemp_low = ((gpadc_cal[4] & 0x7C) >> 2);
+
+ gpadc->cal_data[ADC_INPUT_BTEMP].gain =
+ CALIB_SCALE * (1300 - 21) / (btemp_high - btemp_low);
+
+ gpadc->cal_data[ADC_INPUT_BTEMP].offset = CALIB_SCALE * 1300 -
+ (CALIB_SCALE * (1300 - 21) /
+ (btemp_high - btemp_low)) * btemp_high;
+ } else {
+ gpadc->cal_data[ADC_INPUT_BTEMP].gain = 0;
+ }
+
+ /* Calculate gain and offset for VBAT if all reads succeeded */
+ if (!(ret[4] < 0 || ret[5] < 0 || ret[6] < 0)) {
+ vbat_high = (((gpadc_cal[4] & 0x03) << 8) | gpadc_cal[5]);
+ vbat_low = ((gpadc_cal[6] & 0xFC) >> 2);
+
+ gpadc->cal_data[ADC_INPUT_VBAT].gain = CALIB_SCALE *
+ (4700 - 2380) / (vbat_high - vbat_low);
+
+ gpadc->cal_data[ADC_INPUT_VBAT].offset = CALIB_SCALE * 4700 -
+ (CALIB_SCALE * (4700 - 2380) /
+ (vbat_high - vbat_low)) * vbat_high;
+ } else {
+ gpadc->cal_data[ADC_INPUT_VBAT].gain = 0;
+ }
+
+ dev_dbg(gpadc->dev, "VMAIN gain %llu offset %llu\n",
+ gpadc->cal_data[ADC_INPUT_VMAIN].gain,
+ gpadc->cal_data[ADC_INPUT_VMAIN].offset);
+
+ dev_dbg(gpadc->dev, "BTEMP gain %llu offset %llu\n",
+ gpadc->cal_data[ADC_INPUT_BTEMP].gain,
+ gpadc->cal_data[ADC_INPUT_BTEMP].offset);
+
+ dev_dbg(gpadc->dev, "VBAT gain %llu offset %llu\n",
+ gpadc->cal_data[ADC_INPUT_VBAT].gain,
+ gpadc->cal_data[ADC_INPUT_VBAT].offset);
+}
+
+static int __devinit ab8500_gpadc_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = kzalloc(sizeof(struct ab8500_gpadc), GFP_KERNEL);
+ if (!gpadc) {
+ dev_err(&pdev->dev, "Error: No memory\n");
+ return -ENOMEM;
+ }
+
+ gpadc->irq = platform_get_irq_byname(pdev, "SW_CONV_END");
+ if (gpadc->irq < 0) {
+ dev_err(gpadc->dev, "failed to get platform irq-%d\n",
+ gpadc->irq);
+ ret = gpadc->irq;
+ goto fail;
+ }
+
+ gpadc->dev = &pdev->dev;
+ mutex_init(&gpadc->ab8500_gpadc_lock);
+
+ /* Initialize completion used to notify completion of conversion */
+ init_completion(&gpadc->ab8500_gpadc_complete);
+
+ /* Register interrupt - SwAdcComplete */
+ ret = request_threaded_irq(gpadc->irq, NULL,
+ ab8500_bm_gpswadcconvend_handler,
+ IRQF_NO_SUSPEND | IRQF_SHARED, "ab8500-gpadc", gpadc);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "Failed to register interrupt, irq: %d\n",
+ gpadc->irq);
+ goto fail;
+ }
+
+ /* VTVout LDO used to power up ab8500-GPADC */
+ gpadc->regu = regulator_get(&pdev->dev, "vddadc");
+ if (IS_ERR(gpadc->regu)) {
+ ret = PTR_ERR(gpadc->regu);
+ dev_err(gpadc->dev, "failed to get vtvout LDO\n");
+ goto fail_irq;
+ }
+ ab8500_gpadc_read_calibration_data(gpadc);
+ list_add_tail(&gpadc->node, &ab8500_gpadc_list);
+ dev_dbg(gpadc->dev, "probe success\n");
+ return 0;
+fail_irq:
+ free_irq(gpadc->irq, gpadc);
+fail:
+ kfree(gpadc);
+ gpadc = NULL;
+ return ret;
+}
+
+static int __devexit ab8500_gpadc_remove(struct platform_device *pdev)
+{
+ struct ab8500_gpadc *gpadc = platform_get_drvdata(pdev);
+
+ /* remove this gpadc entry from the list */
+ list_del(&gpadc->node);
+ /* remove interrupt - completion of Sw ADC conversion */
+ free_irq(gpadc->irq, gpadc);
+ /* disable VTVout LDO that is being used by GPADC */
+ regulator_put(gpadc->regu);
+ kfree(gpadc);
+ gpadc = NULL;
+ return 0;
+}
+
+static struct platform_driver ab8500_gpadc_driver = {
+ .probe = ab8500_gpadc_probe,
+ .remove = __devexit_p(ab8500_gpadc_remove),
+ .driver = {
+ .name = "ab8500-gpadc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab8500_gpadc_init(void)
+{
+ return platform_driver_register(&ab8500_gpadc_driver);
+}
+
+static void __exit ab8500_gpadc_exit(void)
+{
+ platform_driver_unregister(&ab8500_gpadc_driver);
+}
+
+subsys_initcall_sync(ab8500_gpadc_init);
+module_exit(ab8500_gpadc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Arun R Murthy, Daniel Willerud, Johan Palsson");
+MODULE_ALIAS("platform:ab8500_gpadc");
+MODULE_DESCRIPTION("AB8500 GPADC driver");
diff --git a/drivers/mfd/ab8500-i2c.c b/drivers/mfd/ab8500-i2c.c
index 6820327adf4..821e6b86afd 100644
--- a/drivers/mfd/ab8500-i2c.c
+++ b/drivers/mfd/ab8500-i2c.c
@@ -97,7 +97,7 @@ static void __exit ab8500_i2c_exit(void)
{
platform_driver_unregister(&ab8500_i2c_driver);
}
-subsys_initcall(ab8500_i2c_init);
+arch_initcall(ab8500_i2c_init);
module_exit(ab8500_i2c_exit);
MODULE_AUTHOR("Mattias WALLIN <mattias.wallin@stericsson.com");
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
new file mode 100644
index 00000000000..392185965b3
--- /dev/null
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com> for ST Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/ab8500.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/ab8500/sysctrl.h>
+
+static struct device *sysctrl_dev;
+
+static inline bool valid_bank(u8 bank)
+{
+ return ((bank == AB8500_SYS_CTRL1_BLOCK) ||
+ (bank == AB8500_SYS_CTRL2_BLOCK));
+}
+
+int ab8500_sysctrl_read(u16 reg, u8 *value)
+{
+ u8 bank;
+
+ if (sysctrl_dev == NULL)
+ return -EAGAIN;
+
+ bank = (reg >> 8);
+ if (!valid_bank(bank))
+ return -EINVAL;
+
+ return abx500_get_register_interruptible(sysctrl_dev, bank,
+ (u8)(reg & 0xFF), value);
+}
+
+int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value)
+{
+ u8 bank;
+
+ if (sysctrl_dev == NULL)
+ return -EAGAIN;
+
+ bank = (reg >> 8);
+ if (!valid_bank(bank))
+ return -EINVAL;
+
+ return abx500_mask_and_set_register_interruptible(sysctrl_dev, bank,
+ (u8)(reg & 0xFF), mask, value);
+}
+
+static int __devinit ab8500_sysctrl_probe(struct platform_device *pdev)
+{
+ sysctrl_dev = &pdev->dev;
+ return 0;
+}
+
+static int __devexit ab8500_sysctrl_remove(struct platform_device *pdev)
+{
+ sysctrl_dev = NULL;
+ return 0;
+}
+
+static struct platform_driver ab8500_sysctrl_driver = {
+ .driver = {
+ .name = "ab8500-sysctrl",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab8500_sysctrl_probe,
+ .remove = __devexit_p(ab8500_sysctrl_remove),
+};
+
+static int __init ab8500_sysctrl_init(void)
+{
+ return platform_driver_register(&ab8500_sysctrl_driver);
+}
+subsys_initcall(ab8500_sysctrl_init);
+
+MODULE_AUTHOR("Mattias Nilsson <mattias.i.nilsson@stericsson.com");
+MODULE_DESCRIPTION("AB8500 system control driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
index 3122139b430..f1d88483112 100644
--- a/drivers/mfd/adp5520.c
+++ b/drivers/mfd/adp5520.c
@@ -321,27 +321,27 @@ static int __devexit adp5520_remove(struct i2c_client *client)
}
#ifdef CONFIG_PM
-static int adp5520_suspend(struct i2c_client *client,
- pm_message_t state)
+static int adp5520_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct adp5520_chip *chip = dev_get_drvdata(&client->dev);
adp5520_clr_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY);
return 0;
}
-static int adp5520_resume(struct i2c_client *client)
+static int adp5520_resume(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct adp5520_chip *chip = dev_get_drvdata(&client->dev);
adp5520_set_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY);
return 0;
}
-#else
-#define adp5520_suspend NULL
-#define adp5520_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(adp5520_pm, adp5520_suspend, adp5520_resume);
+
static const struct i2c_device_id adp5520_id[] = {
{ "pmic-adp5520", ID_ADP5520 },
{ "pmic-adp5501", ID_ADP5501 },
@@ -353,11 +353,10 @@ static struct i2c_driver adp5520_driver = {
.driver = {
.name = "adp5520",
.owner = THIS_MODULE,
+ .pm = &adp5520_pm,
},
.probe = adp5520_probe,
.remove = __devexit_p(adp5520_remove),
- .suspend = adp5520_suspend,
- .resume = adp5520_resume,
.id_table = adp5520_id,
};
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index c45e6305b26..0241f08fc00 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -682,7 +682,7 @@ static struct mfd_cell asic3_cell_ds1wm = {
.name = "ds1wm",
.enable = ds1wm_enable,
.disable = ds1wm_disable,
- .driver_data = &ds1wm_pdata,
+ .mfd_data = &ds1wm_pdata,
.num_resources = ARRAY_SIZE(ds1wm_resources),
.resources = ds1wm_resources,
};
@@ -783,7 +783,7 @@ static struct mfd_cell asic3_cell_mmc = {
.name = "tmio-mmc",
.enable = asic3_mmc_enable,
.disable = asic3_mmc_disable,
- .driver_data = &asic3_mmc_data,
+ .mfd_data = &asic3_mmc_data,
.num_resources = ARRAY_SIZE(asic3_mmc_resources),
.resources = asic3_mmc_resources,
};
@@ -810,9 +810,6 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
ds1wm_resources[0].start >>= asic->bus_shift;
ds1wm_resources[0].end >>= asic->bus_shift;
- asic3_cell_ds1wm.platform_data = &asic3_cell_ds1wm;
- asic3_cell_ds1wm.data_size = sizeof(asic3_cell_ds1wm);
-
/* MMC */
asic->tmio_cnf = ioremap((ASIC3_SD_CONFIG_BASE >> asic->bus_shift) +
mem_sdio->start, 0x400 >> asic->bus_shift);
@@ -824,9 +821,6 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
asic3_mmc_resources[0].start >>= asic->bus_shift;
asic3_mmc_resources[0].end >>= asic->bus_shift;
- asic3_cell_mmc.platform_data = &asic3_cell_mmc;
- asic3_cell_mmc.data_size = sizeof(asic3_cell_mmc);
-
ret = mfd_add_devices(&pdev->dev, pdev->id,
&asic3_cell_ds1wm, 1, mem, asic->irq_base);
if (ret < 0)
diff --git a/drivers/mfd/cs5535-mfd.c b/drivers/mfd/cs5535-mfd.c
index 59ca6f151e7..886a0687106 100644
--- a/drivers/mfd/cs5535-mfd.c
+++ b/drivers/mfd/cs5535-mfd.c
@@ -39,6 +39,37 @@ enum cs5535_mfd_bars {
NR_BARS,
};
+static int cs5535_mfd_res_enable(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "can't fetch device resource info\n");
+ return -EIO;
+ }
+
+ if (!request_region(res->start, resource_size(res), DRV_NAME)) {
+ dev_err(&pdev->dev, "can't request region\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int cs5535_mfd_res_disable(struct platform_device *pdev)
+{
+ struct resource *res;
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "can't fetch device resource info\n");
+ return -EIO;
+ }
+
+ release_region(res->start, resource_size(res));
+ return 0;
+}
+
static __devinitdata struct resource cs5535_mfd_resources[NR_BARS];
static __devinitdata struct mfd_cell cs5535_mfd_cells[] = {
@@ -65,12 +96,18 @@ static __devinitdata struct mfd_cell cs5535_mfd_cells[] = {
.name = "cs5535-pms",
.num_resources = 1,
.resources = &cs5535_mfd_resources[PMS_BAR],
+
+ .enable = cs5535_mfd_res_enable,
+ .disable = cs5535_mfd_res_disable,
},
{
.id = ACPI_BAR,
.name = "cs5535-acpi",
.num_resources = 1,
.resources = &cs5535_mfd_resources[ACPI_BAR],
+
+ .enable = cs5535_mfd_res_enable,
+ .disable = cs5535_mfd_res_disable,
},
};
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
index fdd8a1b8bc6..414783b0484 100644
--- a/drivers/mfd/davinci_voicecodec.c
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -119,12 +119,12 @@ static int __init davinci_vc_probe(struct platform_device *pdev)
/* Voice codec interface client */
cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL];
cell->name = "davinci-vcif";
- cell->driver_data = davinci_vc;
+ cell->mfd_data = davinci_vc;
/* Voice codec CQ93VC client */
cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL];
cell->name = "cq93vc-codec";
- cell->driver_data = davinci_vc;
+ cell->mfd_data = davinci_vc;
ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells,
DAVINCI_VC_CELLS, NULL, 0);
diff --git a/drivers/mfd/htc-pasic3.c b/drivers/mfd/htc-pasic3.c
index 7bc752272dc..fb9770b39a3 100644
--- a/drivers/mfd/htc-pasic3.c
+++ b/drivers/mfd/htc-pasic3.c
@@ -117,7 +117,7 @@ static struct mfd_cell ds1wm_cell __initdata = {
.name = "ds1wm",
.enable = ds1wm_enable,
.disable = ds1wm_disable,
- .driver_data = &ds1wm_pdata,
+ .mfd_data = &ds1wm_pdata,
.num_resources = 2,
.resources = ds1wm_resources,
};
@@ -165,8 +165,6 @@ static int __init pasic3_probe(struct platform_device *pdev)
ds1wm_pdata.clock_rate = pdata->clock_rate;
/* the first 5 PASIC3 registers control the DS1WM */
ds1wm_resources[0].end = (5 << asic->bus_shift) - 1;
- ds1wm_cell.platform_data = &ds1wm_cell;
- ds1wm_cell.data_size = sizeof(ds1wm_cell);
ret = mfd_add_devices(&pdev->dev, pdev->id,
&ds1wm_cell, 1, r, irq);
if (ret < 0)
@@ -174,9 +172,6 @@ static int __init pasic3_probe(struct platform_device *pdev)
}
if (pdata && pdata->led_pdata) {
- led_cell.driver_data = pdata->led_pdata;
- led_cell.platform_data = &led_cell;
- led_cell.data_size = sizeof(ds1wm_cell);
ret = mfd_add_devices(&pdev->dev, pdev->id, &led_cell, 1, r, 0);
if (ret < 0)
dev_warn(dev, "failed to register LED device\n");
diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
index 36a166bcdb0..fc4191137e9 100644
--- a/drivers/mfd/janz-cmodio.c
+++ b/drivers/mfd/janz-cmodio.c
@@ -86,8 +86,7 @@ static int __devinit cmodio_setup_subdevice(struct cmodio_device *priv,
/* Add platform data */
pdata->modno = modno;
- cell->platform_data = pdata;
- cell->data_size = sizeof(*pdata);
+ cell->mfd_data = pdata;
/* MODULbus registers -- PCI BAR3 is big-endian MODULbus access */
res->flags = IORESOURCE_MEM;
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 0cc59795f60..aa518b9beaf 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -232,8 +232,6 @@ const struct mfd_cell jz4740_adc_cells[] = {
.name = "jz4740-hwmon",
.num_resources = ARRAY_SIZE(jz4740_hwmon_resources),
.resources = jz4740_hwmon_resources,
- .platform_data = (void *)&jz4740_adc_cells[0],
- .data_size = sizeof(struct mfd_cell),
.enable = jz4740_adc_cell_enable,
.disable = jz4740_adc_cell_disable,
@@ -243,8 +241,6 @@ const struct mfd_cell jz4740_adc_cells[] = {
.name = "jz4740-battery",
.num_resources = ARRAY_SIZE(jz4740_battery_resources),
.resources = jz4740_battery_resources,
- .platform_data = (void *)&jz4740_adc_cells[1],
- .data_size = sizeof(struct mfd_cell),
.enable = jz4740_adc_cell_enable,
.disable = jz4740_adc_cell_disable,
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
new file mode 100644
index 00000000000..5d1fca0277e
--- /dev/null
+++ b/drivers/mfd/max8997.c
@@ -0,0 +1,427 @@
+/*
+ * max8997.c - mfd core driver for the Maxim 8966 and 8997
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@smasung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This driver is based on max8998.c
+ */
+
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+#include <linux/mutex.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/max8997.h>
+#include <linux/mfd/max8997-private.h>
+
+#define I2C_ADDR_PMIC (0xCC >> 1)
+#define I2C_ADDR_MUIC (0x4A >> 1)
+#define I2C_ADDR_BATTERY (0x6C >> 1)
+#define I2C_ADDR_RTC (0x0C >> 1)
+#define I2C_ADDR_HAPTIC (0x90 >> 1)
+
+static struct mfd_cell max8997_devs[] = {
+ { .name = "max8997-pmic", },
+ { .name = "max8997-rtc", },
+ { .name = "max8997-battery", },
+ { .name = "max8997-haptic", },
+ { .name = "max8997-muic", },
+ { .name = "max8997-flash", },
+};
+
+int max8997_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest)
+{
+ struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
+ int ret;
+
+ mutex_lock(&max8997->iolock);
+ ret = i2c_smbus_read_byte_data(i2c, reg);
+ mutex_unlock(&max8997->iolock);
+ if (ret < 0)
+ return ret;
+
+ ret &= 0xff;
+ *dest = ret;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(max8997_read_reg);
+
+int max8997_bulk_read(struct i2c_client *i2c, u8 reg, int count, u8 *buf)
+{
+ struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
+ int ret;
+
+ mutex_lock(&max8997->iolock);
+ ret = i2c_smbus_read_i2c_block_data(i2c, reg, count, buf);
+ mutex_unlock(&max8997->iolock);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(max8997_bulk_read);
+
+int max8997_write_reg(struct i2c_client *i2c, u8 reg, u8 value)
+{
+ struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
+ int ret;
+
+ mutex_lock(&max8997->iolock);
+ ret = i2c_smbus_write_byte_data(i2c, reg, value);
+ mutex_unlock(&max8997->iolock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(max8997_write_reg);
+
+int max8997_bulk_write(struct i2c_client *i2c, u8 reg, int count, u8 *buf)
+{
+ struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
+ int ret;
+
+ mutex_lock(&max8997->iolock);
+ ret = i2c_smbus_write_i2c_block_data(i2c, reg, count, buf);
+ mutex_unlock(&max8997->iolock);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(max8997_bulk_write);
+
+int max8997_update_reg(struct i2c_client *i2c, u8 reg, u8 val, u8 mask)
+{
+ struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
+ int ret;
+
+ mutex_lock(&max8997->iolock);
+ ret = i2c_smbus_read_byte_data(i2c, reg);
+ if (ret >= 0) {
+ u8 old_val = ret & 0xff;
+ u8 new_val = (val & mask) | (old_val & (~mask));
+ ret = i2c_smbus_write_byte_data(i2c, reg, new_val);
+ }
+ mutex_unlock(&max8997->iolock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(max8997_update_reg);
+
+static int max8997_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct max8997_dev *max8997;
+ struct max8997_platform_data *pdata = i2c->dev.platform_data;
+ int ret = 0;
+
+ max8997 = kzalloc(sizeof(struct max8997_dev), GFP_KERNEL);
+ if (max8997 == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, max8997);
+ max8997->dev = &i2c->dev;
+ max8997->i2c = i2c;
+ max8997->type = id->driver_data;
+
+ if (!pdata)
+ goto err;
+
+ max8997->wakeup = pdata->wakeup;
+
+ mutex_init(&max8997->iolock);
+
+ max8997->rtc = i2c_new_dummy(i2c->adapter, I2C_ADDR_RTC);
+ i2c_set_clientdata(max8997->rtc, max8997);
+ max8997->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
+ i2c_set_clientdata(max8997->haptic, max8997);
+ max8997->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
+ i2c_set_clientdata(max8997->muic, max8997);
+
+ pm_runtime_set_active(max8997->dev);
+
+ mfd_add_devices(max8997->dev, -1, max8997_devs,
+ ARRAY_SIZE(max8997_devs),
+ NULL, 0);
+
+ /*
+ * TODO: enable others (flash, muic, rtc, battery, ...) and
+ * check the return value
+ */
+
+ if (ret < 0)
+ goto err_mfd;
+
+ return ret;
+
+err_mfd:
+ mfd_remove_devices(max8997->dev);
+ i2c_unregister_device(max8997->muic);
+ i2c_unregister_device(max8997->haptic);
+ i2c_unregister_device(max8997->rtc);
+err:
+ kfree(max8997);
+ return ret;
+}
+
+static int max8997_i2c_remove(struct i2c_client *i2c)
+{
+ struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
+
+ mfd_remove_devices(max8997->dev);
+ i2c_unregister_device(max8997->muic);
+ i2c_unregister_device(max8997->haptic);
+ i2c_unregister_device(max8997->rtc);
+ kfree(max8997);
+
+ return 0;
+}
+
+static const struct i2c_device_id max8997_i2c_id[] = {
+ { "max8997", TYPE_MAX8997 },
+ { "max8966", TYPE_MAX8966 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max8998_i2c_id);
+
+u8 max8997_dumpaddr_pmic[] = {
+ MAX8997_REG_INT1MSK,
+ MAX8997_REG_INT2MSK,
+ MAX8997_REG_INT3MSK,
+ MAX8997_REG_INT4MSK,
+ MAX8997_REG_MAINCON1,
+ MAX8997_REG_MAINCON2,
+ MAX8997_REG_BUCKRAMP,
+ MAX8997_REG_BUCK1CTRL,
+ MAX8997_REG_BUCK1DVS1,
+ MAX8997_REG_BUCK1DVS2,
+ MAX8997_REG_BUCK1DVS3,
+ MAX8997_REG_BUCK1DVS4,
+ MAX8997_REG_BUCK1DVS5,
+ MAX8997_REG_BUCK1DVS6,
+ MAX8997_REG_BUCK1DVS7,
+ MAX8997_REG_BUCK1DVS8,
+ MAX8997_REG_BUCK2CTRL,
+ MAX8997_REG_BUCK2DVS1,
+ MAX8997_REG_BUCK2DVS2,
+ MAX8997_REG_BUCK2DVS3,
+ MAX8997_REG_BUCK2DVS4,
+ MAX8997_REG_BUCK2DVS5,
+ MAX8997_REG_BUCK2DVS6,
+ MAX8997_REG_BUCK2DVS7,
+ MAX8997_REG_BUCK2DVS8,
+ MAX8997_REG_BUCK3CTRL,
+ MAX8997_REG_BUCK3DVS,
+ MAX8997_REG_BUCK4CTRL,
+ MAX8997_REG_BUCK4DVS,
+ MAX8997_REG_BUCK5CTRL,
+ MAX8997_REG_BUCK5DVS1,
+ MAX8997_REG_BUCK5DVS2,
+ MAX8997_REG_BUCK5DVS3,
+ MAX8997_REG_BUCK5DVS4,
+ MAX8997_REG_BUCK5DVS5,
+ MAX8997_REG_BUCK5DVS6,
+ MAX8997_REG_BUCK5DVS7,
+ MAX8997_REG_BUCK5DVS8,
+ MAX8997_REG_BUCK6CTRL,
+ MAX8997_REG_BUCK6BPSKIPCTRL,
+ MAX8997_REG_BUCK7CTRL,
+ MAX8997_REG_BUCK7DVS,
+ MAX8997_REG_LDO1CTRL,
+ MAX8997_REG_LDO2CTRL,
+ MAX8997_REG_LDO3CTRL,
+ MAX8997_REG_LDO4CTRL,
+ MAX8997_REG_LDO5CTRL,
+ MAX8997_REG_LDO6CTRL,
+ MAX8997_REG_LDO7CTRL,
+ MAX8997_REG_LDO8CTRL,
+ MAX8997_REG_LDO9CTRL,
+ MAX8997_REG_LDO10CTRL,
+ MAX8997_REG_LDO11CTRL,
+ MAX8997_REG_LDO12CTRL,
+ MAX8997_REG_LDO13CTRL,
+ MAX8997_REG_LDO14CTRL,
+ MAX8997_REG_LDO15CTRL,
+ MAX8997_REG_LDO16CTRL,
+ MAX8997_REG_LDO17CTRL,
+ MAX8997_REG_LDO18CTRL,
+ MAX8997_REG_LDO21CTRL,
+ MAX8997_REG_MBCCTRL1,
+ MAX8997_REG_MBCCTRL2,
+ MAX8997_REG_MBCCTRL3,
+ MAX8997_REG_MBCCTRL4,
+ MAX8997_REG_MBCCTRL5,
+ MAX8997_REG_MBCCTRL6,
+ MAX8997_REG_OTPCGHCVS,
+ MAX8997_REG_SAFEOUTCTRL,
+ MAX8997_REG_LBCNFG1,
+ MAX8997_REG_LBCNFG2,
+ MAX8997_REG_BBCCTRL,
+
+ MAX8997_REG_FLASH1_CUR,
+ MAX8997_REG_FLASH2_CUR,
+ MAX8997_REG_MOVIE_CUR,
+ MAX8997_REG_GSMB_CUR,
+ MAX8997_REG_BOOST_CNTL,
+ MAX8997_REG_LEN_CNTL,
+ MAX8997_REG_FLASH_CNTL,
+ MAX8997_REG_WDT_CNTL,
+ MAX8997_REG_MAXFLASH1,
+ MAX8997_REG_MAXFLASH2,
+ MAX8997_REG_FLASHSTATUSMASK,
+
+ MAX8997_REG_GPIOCNTL1,
+ MAX8997_REG_GPIOCNTL2,
+ MAX8997_REG_GPIOCNTL3,
+ MAX8997_REG_GPIOCNTL4,
+ MAX8997_REG_GPIOCNTL5,
+ MAX8997_REG_GPIOCNTL6,
+ MAX8997_REG_GPIOCNTL7,
+ MAX8997_REG_GPIOCNTL8,
+ MAX8997_REG_GPIOCNTL9,
+ MAX8997_REG_GPIOCNTL10,
+ MAX8997_REG_GPIOCNTL11,
+ MAX8997_REG_GPIOCNTL12,
+
+ MAX8997_REG_LDO1CONFIG,
+ MAX8997_REG_LDO2CONFIG,
+ MAX8997_REG_LDO3CONFIG,
+ MAX8997_REG_LDO4CONFIG,
+ MAX8997_REG_LDO5CONFIG,
+ MAX8997_REG_LDO6CONFIG,
+ MAX8997_REG_LDO7CONFIG,
+ MAX8997_REG_LDO8CONFIG,
+ MAX8997_REG_LDO9CONFIG,
+ MAX8997_REG_LDO10CONFIG,
+ MAX8997_REG_LDO11CONFIG,
+ MAX8997_REG_LDO12CONFIG,
+ MAX8997_REG_LDO13CONFIG,
+ MAX8997_REG_LDO14CONFIG,
+ MAX8997_REG_LDO15CONFIG,
+ MAX8997_REG_LDO16CONFIG,
+ MAX8997_REG_LDO17CONFIG,
+ MAX8997_REG_LDO18CONFIG,
+ MAX8997_REG_LDO21CONFIG,
+
+ MAX8997_REG_DVSOKTIMER1,
+ MAX8997_REG_DVSOKTIMER2,
+ MAX8997_REG_DVSOKTIMER4,
+ MAX8997_REG_DVSOKTIMER5,
+};
+
+u8 max8997_dumpaddr_muic[] = {
+ MAX8997_MUIC_REG_INTMASK1,
+ MAX8997_MUIC_REG_INTMASK2,
+ MAX8997_MUIC_REG_INTMASK3,
+ MAX8997_MUIC_REG_CDETCTRL,
+ MAX8997_MUIC_REG_CONTROL1,
+ MAX8997_MUIC_REG_CONTROL2,
+ MAX8997_MUIC_REG_CONTROL3,
+};
+
+u8 max8997_dumpaddr_haptic[] = {
+ MAX8997_HAPTIC_REG_CONF1,
+ MAX8997_HAPTIC_REG_CONF2,
+ MAX8997_HAPTIC_REG_DRVCONF,
+ MAX8997_HAPTIC_REG_CYCLECONF1,
+ MAX8997_HAPTIC_REG_CYCLECONF2,
+ MAX8997_HAPTIC_REG_SIGCONF1,
+ MAX8997_HAPTIC_REG_SIGCONF2,
+ MAX8997_HAPTIC_REG_SIGCONF3,
+ MAX8997_HAPTIC_REG_SIGCONF4,
+ MAX8997_HAPTIC_REG_SIGDC1,
+ MAX8997_HAPTIC_REG_SIGDC2,
+ MAX8997_HAPTIC_REG_SIGPWMDC1,
+ MAX8997_HAPTIC_REG_SIGPWMDC2,
+ MAX8997_HAPTIC_REG_SIGPWMDC3,
+ MAX8997_HAPTIC_REG_SIGPWMDC4,
+};
+
+static int max8997_freeze(struct device *dev)
+{
+ struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(max8997_dumpaddr_pmic); i++)
+ max8997_read_reg(i2c, max8997_dumpaddr_pmic[i],
+ &max8997->reg_dump[i]);
+
+ for (i = 0; i < ARRAY_SIZE(max8997_dumpaddr_muic); i++)
+ max8997_read_reg(i2c, max8997_dumpaddr_muic[i],
+ &max8997->reg_dump[i + MAX8997_REG_PMIC_END]);
+
+ for (i = 0; i < ARRAY_SIZE(max8997_dumpaddr_haptic); i++)
+ max8997_read_reg(i2c, max8997_dumpaddr_haptic[i],
+ &max8997->reg_dump[i + MAX8997_REG_PMIC_END +
+ MAX8997_MUIC_REG_END]);
+
+ return 0;
+}
+
+static int max8997_restore(struct device *dev)
+{
+ struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(max8997_dumpaddr_pmic); i++)
+ max8997_write_reg(i2c, max8997_dumpaddr_pmic[i],
+ max8997->reg_dump[i]);
+
+ for (i = 0; i < ARRAY_SIZE(max8997_dumpaddr_muic); i++)
+ max8997_write_reg(i2c, max8997_dumpaddr_muic[i],
+ max8997->reg_dump[i + MAX8997_REG_PMIC_END]);
+
+ for (i = 0; i < ARRAY_SIZE(max8997_dumpaddr_haptic); i++)
+ max8997_write_reg(i2c, max8997_dumpaddr_haptic[i],
+ max8997->reg_dump[i + MAX8997_REG_PMIC_END +
+ MAX8997_MUIC_REG_END]);
+
+ return 0;
+}
+
+const struct dev_pm_ops max8997_pm = {
+ .freeze = max8997_freeze,
+ .restore = max8997_restore,
+};
+
+static struct i2c_driver max8997_i2c_driver = {
+ .driver = {
+ .name = "max8997",
+ .owner = THIS_MODULE,
+ .pm = &max8997_pm,
+ },
+ .probe = max8997_i2c_probe,
+ .remove = max8997_i2c_remove,
+ .id_table = max8997_i2c_id,
+};
+
+static int __init max8997_i2c_init(void)
+{
+ return i2c_add_driver(&max8997_i2c_driver);
+}
+/* init early so consumer devices can complete system boot */
+subsys_initcall(max8997_i2c_init);
+
+static void __exit max8997_i2c_exit(void)
+{
+ i2c_del_driver(&max8997_i2c_driver);
+}
+module_exit(max8997_i2c_exit);
+
+MODULE_DESCRIPTION("MAXIM 8997 multi-function core driver");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index bbfe8673260..c00214257da 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -233,7 +233,7 @@ struct max8998_reg_dump {
u8 val;
};
#define SAVE_ITEM(x) { .addr = (x), .val = 0x0, }
-struct max8998_reg_dump max8998_dump[] = {
+static struct max8998_reg_dump max8998_dump[] = {
SAVE_ITEM(MAX8998_REG_IRQM1),
SAVE_ITEM(MAX8998_REG_IRQM2),
SAVE_ITEM(MAX8998_REG_IRQM3),
@@ -298,7 +298,7 @@ static int max8998_restore(struct device *dev)
return 0;
}
-const struct dev_pm_ops max8998_pm = {
+static const struct dev_pm_ops max8998_pm = {
.suspend = max8998_suspend,
.resume = max8998_resume,
.freeze = max8998_freeze,
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index b9fcaf0004d..668634e89e8 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -683,14 +683,13 @@ out:
EXPORT_SYMBOL_GPL(mc13783_adc_do_conversion);
static int mc13xxx_add_subdevice_pdata(struct mc13xxx *mc13xxx,
- const char *format, void *pdata, size_t pdata_size)
+ const char *format, void *pdata)
{
char buf[30];
const char *name = mc13xxx_get_chipname(mc13xxx);
struct mfd_cell cell = {
- .platform_data = pdata,
- .data_size = pdata_size,
+ .mfd_data = pdata,
};
/* there is no asnprintf in the kernel :-( */
@@ -706,7 +705,7 @@ static int mc13xxx_add_subdevice_pdata(struct mc13xxx *mc13xxx,
static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format)
{
- return mc13xxx_add_subdevice_pdata(mc13xxx, format, NULL, 0);
+ return mc13xxx_add_subdevice_pdata(mc13xxx, format, NULL);
}
static int mc13xxx_probe(struct spi_device *spi)
@@ -764,13 +763,8 @@ err_revision:
mc13xxx_add_subdevice(mc13xxx, "%s-codec");
if (pdata->flags & MC13XXX_USE_REGULATOR) {
- struct mc13xxx_regulator_platform_data regulator_pdata = {
- .num_regulators = pdata->num_regulators,
- .regulators = pdata->regulators,
- };
-
mc13xxx_add_subdevice_pdata(mc13xxx, "%s-regulator",
- &regulator_pdata, sizeof(regulator_pdata));
+ &pdata->regulators);
}
if (pdata->flags & MC13XXX_USE_RTC)
@@ -779,10 +773,8 @@ err_revision:
if (pdata->flags & MC13XXX_USE_TOUCHSCREEN)
mc13xxx_add_subdevice(mc13xxx, "%s-ts");
- if (pdata->flags & MC13XXX_USE_LED) {
- mc13xxx_add_subdevice_pdata(mc13xxx, "%s-led",
- pdata->leds, sizeof(*pdata->leds));
- }
+ if (pdata->flags & MC13XXX_USE_LED)
+ mc13xxx_add_subdevice_pdata(mc13xxx, "%s-led", pdata->leds);
return 0;
}
@@ -811,6 +803,7 @@ static const struct spi_device_id mc13xxx_device_id[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(spi, mc13xxx_device_id);
static struct spi_driver mc13xxx_driver = {
.id_table = mc13xxx_device_id,
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index d83ad0f141a..79eda0264fb 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -18,6 +18,43 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+int mfd_cell_enable(struct platform_device *pdev)
+{
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
+ int err = 0;
+
+ /* only call enable hook if the cell wasn't previously enabled */
+ if (atomic_inc_return(cell->usage_count) == 1)
+ err = cell->enable(pdev);
+
+ /* if the enable hook failed, decrement counter to allow retries */
+ if (err)
+ atomic_dec(cell->usage_count);
+
+ return err;
+}
+EXPORT_SYMBOL(mfd_cell_enable);
+
+int mfd_cell_disable(struct platform_device *pdev)
+{
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
+ int err = 0;
+
+ /* only disable if no other clients are using it */
+ if (atomic_dec_return(cell->usage_count) == 0)
+ err = cell->disable(pdev);
+
+ /* if the disable hook failed, increment to allow retries */
+ if (err)
+ atomic_inc(cell->usage_count);
+
+ /* sanity check; did someone call disable too many times? */
+ WARN_ON(atomic_read(cell->usage_count) < 0);
+
+ return err;
+}
+EXPORT_SYMBOL(mfd_cell_disable);
+
static int mfd_add_device(struct device *parent, int id,
const struct mfd_cell *cell,
struct resource *mem_base,
@@ -37,14 +74,10 @@ static int mfd_add_device(struct device *parent, int id,
goto fail_device;
pdev->dev.parent = parent;
- platform_set_drvdata(pdev, cell->driver_data);
- if (cell->data_size) {
- ret = platform_device_add_data(pdev,
- cell->platform_data, cell->data_size);
- if (ret)
- goto fail_res;
- }
+ ret = platform_device_add_data(pdev, cell, sizeof(*cell));
+ if (ret)
+ goto fail_res;
for (r = 0; r < cell->num_resources; r++) {
res[r].name = cell->resources[r].name;
@@ -100,14 +133,22 @@ fail_alloc:
}
int mfd_add_devices(struct device *parent, int id,
- const struct mfd_cell *cells, int n_devs,
+ struct mfd_cell *cells, int n_devs,
struct resource *mem_base,
int irq_base)
{
int i;
int ret = 0;
+ atomic_t *cnts;
+
+ /* initialize reference counting for all cells */
+ cnts = kcalloc(sizeof(*cnts), n_devs, GFP_KERNEL);
+ if (!cnts)
+ return -ENOMEM;
for (i = 0; i < n_devs; i++) {
+ atomic_set(&cnts[i], 0);
+ cells[i].usage_count = &cnts[i];
ret = mfd_add_device(parent, id, cells + i, mem_base, irq_base);
if (ret)
break;
@@ -120,17 +161,89 @@ int mfd_add_devices(struct device *parent, int id,
}
EXPORT_SYMBOL(mfd_add_devices);
-static int mfd_remove_devices_fn(struct device *dev, void *unused)
+static int mfd_remove_devices_fn(struct device *dev, void *c)
{
- platform_device_unregister(to_platform_device(dev));
+ struct platform_device *pdev = to_platform_device(dev);
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
+ atomic_t **usage_count = c;
+
+ /* find the base address of usage_count pointers (for freeing) */
+ if (!*usage_count || (cell->usage_count < *usage_count))
+ *usage_count = cell->usage_count;
+
+ platform_device_unregister(pdev);
return 0;
}
void mfd_remove_devices(struct device *parent)
{
- device_for_each_child(parent, NULL, mfd_remove_devices_fn);
+ atomic_t *cnts = NULL;
+
+ device_for_each_child(parent, &cnts, mfd_remove_devices_fn);
+ kfree(cnts);
}
EXPORT_SYMBOL(mfd_remove_devices);
+static int add_shared_platform_device(const char *cell, const char *name)
+{
+ struct mfd_cell cell_entry;
+ struct device *dev;
+ struct platform_device *pdev;
+ int err;
+
+ /* check if we've already registered a device (don't fail if we have) */
+ if (bus_find_device_by_name(&platform_bus_type, NULL, name))
+ return 0;
+
+ /* fetch the parent cell's device (should already be registered!) */
+ dev = bus_find_device_by_name(&platform_bus_type, NULL, cell);
+ if (!dev) {
+ printk(KERN_ERR "failed to find device for cell %s\n", cell);
+ return -ENODEV;
+ }
+ pdev = to_platform_device(dev);
+ memcpy(&cell_entry, mfd_get_cell(pdev), sizeof(cell_entry));
+
+ WARN_ON(!cell_entry.enable);
+
+ cell_entry.name = name;
+ err = mfd_add_device(pdev->dev.parent, -1, &cell_entry, NULL, 0);
+ if (err)
+ dev_err(dev, "MFD add devices failed: %d\n", err);
+ return err;
+}
+
+int mfd_shared_platform_driver_register(struct platform_driver *drv,
+ const char *cellname)
+{
+ int err;
+
+ err = add_shared_platform_device(cellname, drv->driver.name);
+ if (err)
+ printk(KERN_ERR "failed to add platform device %s\n",
+ drv->driver.name);
+
+ err = platform_driver_register(drv);
+ if (err)
+ printk(KERN_ERR "failed to add platform driver %s\n",
+ drv->driver.name);
+
+ return err;
+}
+EXPORT_SYMBOL(mfd_shared_platform_driver_register);
+
+void mfd_shared_platform_driver_unregister(struct platform_driver *drv)
+{
+ struct device *dev;
+
+ dev = bus_find_device_by_name(&platform_bus_type, NULL,
+ drv->driver.name);
+ if (dev)
+ platform_device_unregister(to_platform_device(dev));
+
+ platform_driver_unregister(drv);
+}
+EXPORT_SYMBOL(mfd_shared_platform_driver_unregister);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov");
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
new file mode 100644
index 00000000000..47a96708e54
--- /dev/null
+++ b/drivers/mfd/omap-usb-host.c
@@ -0,0 +1,1061 @@
+/**
+ * omap-usb-host.c - The USBHS core driver for OMAP EHCI & OHCI
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Keshava Munegowda <keshava_mgowda@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <plat/usb.h>
+
+#define USBHS_DRIVER_NAME "usbhs-omap"
+#define OMAP_EHCI_DEVICE "ehci-omap"
+#define OMAP_OHCI_DEVICE "ohci-omap3"
+
+/* OMAP USBHOST Register addresses */
+
+/* TLL Register Set */
+#define OMAP_USBTLL_REVISION (0x00)
+#define OMAP_USBTLL_SYSCONFIG (0x10)
+#define OMAP_USBTLL_SYSCONFIG_CACTIVITY (1 << 8)
+#define OMAP_USBTLL_SYSCONFIG_SIDLEMODE (1 << 3)
+#define OMAP_USBTLL_SYSCONFIG_ENAWAKEUP (1 << 2)
+#define OMAP_USBTLL_SYSCONFIG_SOFTRESET (1 << 1)
+#define OMAP_USBTLL_SYSCONFIG_AUTOIDLE (1 << 0)
+
+#define OMAP_USBTLL_SYSSTATUS (0x14)
+#define OMAP_USBTLL_SYSSTATUS_RESETDONE (1 << 0)
+
+#define OMAP_USBTLL_IRQSTATUS (0x18)
+#define OMAP_USBTLL_IRQENABLE (0x1C)
+
+#define OMAP_TLL_SHARED_CONF (0x30)
+#define OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN (1 << 6)
+#define OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN (1 << 5)
+#define OMAP_TLL_SHARED_CONF_USB_DIVRATION (1 << 2)
+#define OMAP_TLL_SHARED_CONF_FCLK_REQ (1 << 1)
+#define OMAP_TLL_SHARED_CONF_FCLK_IS_ON (1 << 0)
+
+#define OMAP_TLL_CHANNEL_CONF(num) (0x040 + 0x004 * num)
+#define OMAP_TLL_CHANNEL_CONF_FSLSMODE_SHIFT 24
+#define OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF (1 << 11)
+#define OMAP_TLL_CHANNEL_CONF_ULPI_ULPIAUTOIDLE (1 << 10)
+#define OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE (1 << 9)
+#define OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE (1 << 8)
+#define OMAP_TLL_CHANNEL_CONF_CHANMODE_FSLS (1 << 1)
+#define OMAP_TLL_CHANNEL_CONF_CHANEN (1 << 0)
+
+#define OMAP_TLL_FSLSMODE_6PIN_PHY_DAT_SE0 0x0
+#define OMAP_TLL_FSLSMODE_6PIN_PHY_DP_DM 0x1
+#define OMAP_TLL_FSLSMODE_3PIN_PHY 0x2
+#define OMAP_TLL_FSLSMODE_4PIN_PHY 0x3
+#define OMAP_TLL_FSLSMODE_6PIN_TLL_DAT_SE0 0x4
+#define OMAP_TLL_FSLSMODE_6PIN_TLL_DP_DM 0x5
+#define OMAP_TLL_FSLSMODE_3PIN_TLL 0x6
+#define OMAP_TLL_FSLSMODE_4PIN_TLL 0x7
+#define OMAP_TLL_FSLSMODE_2PIN_TLL_DAT_SE0 0xA
+#define OMAP_TLL_FSLSMODE_2PIN_DAT_DP_DM 0xB
+
+#define OMAP_TLL_ULPI_FUNCTION_CTRL(num) (0x804 + 0x100 * num)
+#define OMAP_TLL_ULPI_INTERFACE_CTRL(num) (0x807 + 0x100 * num)
+#define OMAP_TLL_ULPI_OTG_CTRL(num) (0x80A + 0x100 * num)
+#define OMAP_TLL_ULPI_INT_EN_RISE(num) (0x80D + 0x100 * num)
+#define OMAP_TLL_ULPI_INT_EN_FALL(num) (0x810 + 0x100 * num)
+#define OMAP_TLL_ULPI_INT_STATUS(num) (0x813 + 0x100 * num)
+#define OMAP_TLL_ULPI_INT_LATCH(num) (0x814 + 0x100 * num)
+#define OMAP_TLL_ULPI_DEBUG(num) (0x815 + 0x100 * num)
+#define OMAP_TLL_ULPI_SCRATCH_REGISTER(num) (0x816 + 0x100 * num)
+
+#define OMAP_TLL_CHANNEL_COUNT 3
+#define OMAP_TLL_CHANNEL_1_EN_MASK (1 << 0)
+#define OMAP_TLL_CHANNEL_2_EN_MASK (1 << 1)
+#define OMAP_TLL_CHANNEL_3_EN_MASK (1 << 2)
+
+/* UHH Register Set */
+#define OMAP_UHH_REVISION (0x00)
+#define OMAP_UHH_SYSCONFIG (0x10)
+#define OMAP_UHH_SYSCONFIG_MIDLEMODE (1 << 12)
+#define OMAP_UHH_SYSCONFIG_CACTIVITY (1 << 8)
+#define OMAP_UHH_SYSCONFIG_SIDLEMODE (1 << 3)
+#define OMAP_UHH_SYSCONFIG_ENAWAKEUP (1 << 2)
+#define OMAP_UHH_SYSCONFIG_SOFTRESET (1 << 1)
+#define OMAP_UHH_SYSCONFIG_AUTOIDLE (1 << 0)
+
+#define OMAP_UHH_SYSSTATUS (0x14)
+#define OMAP_UHH_HOSTCONFIG (0x40)
+#define OMAP_UHH_HOSTCONFIG_ULPI_BYPASS (1 << 0)
+#define OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS (1 << 0)
+#define OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS (1 << 11)
+#define OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS (1 << 12)
+#define OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN (1 << 2)
+#define OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN (1 << 3)
+#define OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN (1 << 4)
+#define OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN (1 << 5)
+#define OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS (1 << 8)
+#define OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS (1 << 9)
+#define OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS (1 << 10)
+#define OMAP4_UHH_HOSTCONFIG_APP_START_CLK (1 << 31)
+
+/* OMAP4-specific defines */
+#define OMAP4_UHH_SYSCONFIG_IDLEMODE_CLEAR (3 << 2)
+#define OMAP4_UHH_SYSCONFIG_NOIDLE (1 << 2)
+#define OMAP4_UHH_SYSCONFIG_STDBYMODE_CLEAR (3 << 4)
+#define OMAP4_UHH_SYSCONFIG_NOSTDBY (1 << 4)
+#define OMAP4_UHH_SYSCONFIG_SOFTRESET (1 << 0)
+
+#define OMAP4_P1_MODE_CLEAR (3 << 16)
+#define OMAP4_P1_MODE_TLL (1 << 16)
+#define OMAP4_P1_MODE_HSIC (3 << 16)
+#define OMAP4_P2_MODE_CLEAR (3 << 18)
+#define OMAP4_P2_MODE_TLL (1 << 18)
+#define OMAP4_P2_MODE_HSIC (3 << 18)
+
+#define OMAP_REV2_TLL_CHANNEL_COUNT 2
+
+#define OMAP_UHH_DEBUG_CSR (0x44)
+
+/* Values of UHH_REVISION - Note: these are not given in the TRM */
+#define OMAP_USBHS_REV1 0x00000010 /* OMAP3 */
+#define OMAP_USBHS_REV2 0x50700100 /* OMAP4 */
+
+#define is_omap_usbhs_rev1(x) (x->usbhs_rev == OMAP_USBHS_REV1)
+#define is_omap_usbhs_rev2(x) (x->usbhs_rev == OMAP_USBHS_REV2)
+
+#define is_ehci_phy_mode(x) (x == OMAP_EHCI_PORT_MODE_PHY)
+#define is_ehci_tll_mode(x) (x == OMAP_EHCI_PORT_MODE_TLL)
+#define is_ehci_hsic_mode(x) (x == OMAP_EHCI_PORT_MODE_HSIC)
+
+
+struct usbhs_hcd_omap {
+ struct clk *usbhost_ick;
+ struct clk *usbhost_hs_fck;
+ struct clk *usbhost_fs_fck;
+ struct clk *xclk60mhsp1_ck;
+ struct clk *xclk60mhsp2_ck;
+ struct clk *utmi_p1_fck;
+ struct clk *usbhost_p1_fck;
+ struct clk *usbtll_p1_fck;
+ struct clk *utmi_p2_fck;
+ struct clk *usbhost_p2_fck;
+ struct clk *usbtll_p2_fck;
+ struct clk *init_60m_fclk;
+ struct clk *usbtll_fck;
+ struct clk *usbtll_ick;
+
+ void __iomem *uhh_base;
+ void __iomem *tll_base;
+
+ struct usbhs_omap_platform_data platdata;
+
+ u32 usbhs_rev;
+ spinlock_t lock;
+ int count;
+};
+/*-------------------------------------------------------------------------*/
+
+const char usbhs_driver_name[] = USBHS_DRIVER_NAME;
+static u64 usbhs_dmamask = ~(u32)0;
+
+/*-------------------------------------------------------------------------*/
+
+static inline void usbhs_write(void __iomem *base, u32 reg, u32 val)
+{
+ __raw_writel(val, base + reg);
+}
+
+static inline u32 usbhs_read(void __iomem *base, u32 reg)
+{
+ return __raw_readl(base + reg);
+}
+
+static inline void usbhs_writeb(void __iomem *base, u8 reg, u8 val)
+{
+ __raw_writeb(val, base + reg);
+}
+
+static inline u8 usbhs_readb(void __iomem *base, u8 reg)
+{
+ return __raw_readb(base + reg);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct platform_device *omap_usbhs_alloc_child(const char *name,
+ struct resource *res, int num_resources, void *pdata,
+ size_t pdata_size, struct device *dev)
+{
+ struct platform_device *child;
+ int ret;
+
+ child = platform_device_alloc(name, 0);
+
+ if (!child) {
+ dev_err(dev, "platform_device_alloc %s failed\n", name);
+ goto err_end;
+ }
+
+ ret = platform_device_add_resources(child, res, num_resources);
+ if (ret) {
+ dev_err(dev, "platform_device_add_resources failed\n");
+ goto err_alloc;
+ }
+
+ ret = platform_device_add_data(child, pdata, pdata_size);
+ if (ret) {
+ dev_err(dev, "platform_device_add_data failed\n");
+ goto err_alloc;
+ }
+
+ child->dev.dma_mask = &usbhs_dmamask;
+ child->dev.coherent_dma_mask = 0xffffffff;
+ child->dev.parent = dev;
+
+ ret = platform_device_add(child);
+ if (ret) {
+ dev_err(dev, "platform_device_add failed\n");
+ goto err_alloc;
+ }
+
+ return child;
+
+err_alloc:
+ platform_device_put(child);
+
+err_end:
+ return NULL;
+}
+
+static int omap_usbhs_alloc_children(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct usbhs_hcd_omap *omap;
+ struct ehci_hcd_omap_platform_data *ehci_data;
+ struct ohci_hcd_omap_platform_data *ohci_data;
+ struct platform_device *ehci;
+ struct platform_device *ohci;
+ struct resource *res;
+ struct resource resources[2];
+ int ret;
+
+ omap = platform_get_drvdata(pdev);
+ ehci_data = omap->platdata.ehci_data;
+ ohci_data = omap->platdata.ohci_data;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ehci");
+ if (!res) {
+ dev_err(dev, "EHCI get resource IORESOURCE_MEM failed\n");
+ ret = -ENODEV;
+ goto err_end;
+ }
+ resources[0] = *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "ehci-irq");
+ if (!res) {
+ dev_err(dev, " EHCI get resource IORESOURCE_IRQ failed\n");
+ ret = -ENODEV;
+ goto err_end;
+ }
+ resources[1] = *res;
+
+ ehci = omap_usbhs_alloc_child(OMAP_EHCI_DEVICE, resources, 2, ehci_data,
+ sizeof(*ehci_data), dev);
+
+ if (!ehci) {
+ dev_err(dev, "omap_usbhs_alloc_child failed\n");
+ goto err_end;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ohci");
+ if (!res) {
+ dev_err(dev, "OHCI get resource IORESOURCE_MEM failed\n");
+ ret = -ENODEV;
+ goto err_ehci;
+ }
+ resources[0] = *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "ohci-irq");
+ if (!res) {
+ dev_err(dev, "OHCI get resource IORESOURCE_IRQ failed\n");
+ ret = -ENODEV;
+ goto err_ehci;
+ }
+ resources[1] = *res;
+
+ ohci = omap_usbhs_alloc_child(OMAP_OHCI_DEVICE, resources, 2, ohci_data,
+ sizeof(*ohci_data), dev);
+ if (!ohci) {
+ dev_err(dev, "omap_usbhs_alloc_child failed\n");
+ goto err_ehci;
+ }
+
+ return 0;
+
+err_ehci:
+ platform_device_put(ehci);
+
+err_end:
+ return ret;
+}
+
+/**
+ * usbhs_omap_probe - initialize TI-based HCDs
+ *
+ * Allocates basic resources for this USB host controller.
+ */
+static int __devinit usbhs_omap_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct usbhs_omap_platform_data *pdata = dev->platform_data;
+ struct usbhs_hcd_omap *omap;
+ struct resource *res;
+ int ret = 0;
+ int i;
+
+ if (!pdata) {
+ dev_err(dev, "Missing platfrom data\n");
+ ret = -ENOMEM;
+ goto end_probe;
+ }
+
+ omap = kzalloc(sizeof(*omap), GFP_KERNEL);
+ if (!omap) {
+ dev_err(dev, "Memory allocation failed\n");
+ ret = -ENOMEM;
+ goto end_probe;
+ }
+
+ spin_lock_init(&omap->lock);
+
+ for (i = 0; i < OMAP3_HS_USB_PORTS; i++)
+ omap->platdata.port_mode[i] = pdata->port_mode[i];
+
+ omap->platdata.ehci_data = pdata->ehci_data;
+ omap->platdata.ohci_data = pdata->ohci_data;
+
+ omap->usbhost_ick = clk_get(dev, "usbhost_ick");
+ if (IS_ERR(omap->usbhost_ick)) {
+ ret = PTR_ERR(omap->usbhost_ick);
+ dev_err(dev, "usbhost_ick failed error:%d\n", ret);
+ goto err_end;
+ }
+
+ omap->usbhost_hs_fck = clk_get(dev, "hs_fck");
+ if (IS_ERR(omap->usbhost_hs_fck)) {
+ ret = PTR_ERR(omap->usbhost_hs_fck);
+ dev_err(dev, "usbhost_hs_fck failed error:%d\n", ret);
+ goto err_usbhost_ick;
+ }
+
+ omap->usbhost_fs_fck = clk_get(dev, "fs_fck");
+ if (IS_ERR(omap->usbhost_fs_fck)) {
+ ret = PTR_ERR(omap->usbhost_fs_fck);
+ dev_err(dev, "usbhost_fs_fck failed error:%d\n", ret);
+ goto err_usbhost_hs_fck;
+ }
+
+ omap->usbtll_fck = clk_get(dev, "usbtll_fck");
+ if (IS_ERR(omap->usbtll_fck)) {
+ ret = PTR_ERR(omap->usbtll_fck);
+ dev_err(dev, "usbtll_fck failed error:%d\n", ret);
+ goto err_usbhost_fs_fck;
+ }
+
+ omap->usbtll_ick = clk_get(dev, "usbtll_ick");
+ if (IS_ERR(omap->usbtll_ick)) {
+ ret = PTR_ERR(omap->usbtll_ick);
+ dev_err(dev, "usbtll_ick failed error:%d\n", ret);
+ goto err_usbtll_fck;
+ }
+
+ omap->utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
+ if (IS_ERR(omap->utmi_p1_fck)) {
+ ret = PTR_ERR(omap->utmi_p1_fck);
+ dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
+ goto err_usbtll_ick;
+ }
+
+ omap->xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
+ if (IS_ERR(omap->xclk60mhsp1_ck)) {
+ ret = PTR_ERR(omap->xclk60mhsp1_ck);
+ dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret);
+ goto err_utmi_p1_fck;
+ }
+
+ omap->utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk");
+ if (IS_ERR(omap->utmi_p2_fck)) {
+ ret = PTR_ERR(omap->utmi_p2_fck);
+ dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret);
+ goto err_xclk60mhsp1_ck;
+ }
+
+ omap->xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck");
+ if (IS_ERR(omap->xclk60mhsp2_ck)) {
+ ret = PTR_ERR(omap->xclk60mhsp2_ck);
+ dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret);
+ goto err_utmi_p2_fck;
+ }
+
+ omap->usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk");
+ if (IS_ERR(omap->usbhost_p1_fck)) {
+ ret = PTR_ERR(omap->usbhost_p1_fck);
+ dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret);
+ goto err_xclk60mhsp2_ck;
+ }
+
+ omap->usbtll_p1_fck = clk_get(dev, "usb_tll_hs_usb_ch0_clk");
+ if (IS_ERR(omap->usbtll_p1_fck)) {
+ ret = PTR_ERR(omap->usbtll_p1_fck);
+ dev_err(dev, "usbtll_p1_fck failed error:%d\n", ret);
+ goto err_usbhost_p1_fck;
+ }
+
+ omap->usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk");
+ if (IS_ERR(omap->usbhost_p2_fck)) {
+ ret = PTR_ERR(omap->usbhost_p2_fck);
+ dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret);
+ goto err_usbtll_p1_fck;
+ }
+
+ omap->usbtll_p2_fck = clk_get(dev, "usb_tll_hs_usb_ch1_clk");
+ if (IS_ERR(omap->usbtll_p2_fck)) {
+ ret = PTR_ERR(omap->usbtll_p2_fck);
+ dev_err(dev, "usbtll_p2_fck failed error:%d\n", ret);
+ goto err_usbhost_p2_fck;
+ }
+
+ omap->init_60m_fclk = clk_get(dev, "init_60m_fclk");
+ if (IS_ERR(omap->init_60m_fclk)) {
+ ret = PTR_ERR(omap->init_60m_fclk);
+ dev_err(dev, "init_60m_fclk failed error:%d\n", ret);
+ goto err_usbtll_p2_fck;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "uhh");
+ if (!res) {
+ dev_err(dev, "UHH EHCI get resource failed\n");
+ ret = -ENODEV;
+ goto err_init_60m_fclk;
+ }
+
+ omap->uhh_base = ioremap(res->start, resource_size(res));
+ if (!omap->uhh_base) {
+ dev_err(dev, "UHH ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_init_60m_fclk;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tll");
+ if (!res) {
+ dev_err(dev, "UHH EHCI get resource failed\n");
+ ret = -ENODEV;
+ goto err_tll;
+ }
+
+ omap->tll_base = ioremap(res->start, resource_size(res));
+ if (!omap->tll_base) {
+ dev_err(dev, "TLL ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_tll;
+ }
+
+ platform_set_drvdata(pdev, omap);
+
+ ret = omap_usbhs_alloc_children(pdev);
+ if (ret) {
+ dev_err(dev, "omap_usbhs_alloc_children failed\n");
+ goto err_alloc;
+ }
+
+ goto end_probe;
+
+err_alloc:
+ iounmap(omap->tll_base);
+
+err_tll:
+ iounmap(omap->uhh_base);
+
+err_init_60m_fclk:
+ clk_put(omap->init_60m_fclk);
+
+err_usbtll_p2_fck:
+ clk_put(omap->usbtll_p2_fck);
+
+err_usbhost_p2_fck:
+ clk_put(omap->usbhost_p2_fck);
+
+err_usbtll_p1_fck:
+ clk_put(omap->usbtll_p1_fck);
+
+err_usbhost_p1_fck:
+ clk_put(omap->usbhost_p1_fck);
+
+err_xclk60mhsp2_ck:
+ clk_put(omap->xclk60mhsp2_ck);
+
+err_utmi_p2_fck:
+ clk_put(omap->utmi_p2_fck);
+
+err_xclk60mhsp1_ck:
+ clk_put(omap->xclk60mhsp1_ck);
+
+err_utmi_p1_fck:
+ clk_put(omap->utmi_p1_fck);
+
+err_usbtll_ick:
+ clk_put(omap->usbtll_ick);
+
+err_usbtll_fck:
+ clk_put(omap->usbtll_fck);
+
+err_usbhost_fs_fck:
+ clk_put(omap->usbhost_fs_fck);
+
+err_usbhost_hs_fck:
+ clk_put(omap->usbhost_hs_fck);
+
+err_usbhost_ick:
+ clk_put(omap->usbhost_ick);
+
+err_end:
+ kfree(omap);
+
+end_probe:
+ return ret;
+}
+
+/**
+ * usbhs_omap_remove - shutdown processing for UHH & TLL HCDs
+ * @pdev: USB Host Controller being removed
+ *
+ * Reverses the effect of usbhs_omap_probe().
+ */
+static int __devexit usbhs_omap_remove(struct platform_device *pdev)
+{
+ struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev);
+
+ if (omap->count != 0) {
+ dev_err(&pdev->dev,
+ "Either EHCI or OHCI is still using usbhs core\n");
+ return -EBUSY;
+ }
+
+ iounmap(omap->tll_base);
+ iounmap(omap->uhh_base);
+ clk_put(omap->init_60m_fclk);
+ clk_put(omap->usbtll_p2_fck);
+ clk_put(omap->usbhost_p2_fck);
+ clk_put(omap->usbtll_p1_fck);
+ clk_put(omap->usbhost_p1_fck);
+ clk_put(omap->xclk60mhsp2_ck);
+ clk_put(omap->utmi_p2_fck);
+ clk_put(omap->xclk60mhsp1_ck);
+ clk_put(omap->utmi_p1_fck);
+ clk_put(omap->usbtll_ick);
+ clk_put(omap->usbtll_fck);
+ clk_put(omap->usbhost_fs_fck);
+ clk_put(omap->usbhost_hs_fck);
+ clk_put(omap->usbhost_ick);
+ kfree(omap);
+
+ return 0;
+}
+
+static bool is_ohci_port(enum usbhs_omap_port_mode pmode)
+{
+ switch (pmode) {
+ case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0:
+ case OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM:
+ case OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0:
+ case OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM:
+ case OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0:
+ case OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM:
+ case OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0:
+ case OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM:
+ case OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0:
+ case OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+/*
+ * convert the port-mode enum to a value we can use in the FSLSMODE
+ * field of USBTLL_CHANNEL_CONF
+ */
+static unsigned ohci_omap3_fslsmode(enum usbhs_omap_port_mode mode)
+{
+ switch (mode) {
+ case OMAP_USBHS_PORT_MODE_UNUSED:
+ case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0:
+ return OMAP_TLL_FSLSMODE_6PIN_PHY_DAT_SE0;
+
+ case OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM:
+ return OMAP_TLL_FSLSMODE_6PIN_PHY_DP_DM;
+
+ case OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0:
+ return OMAP_TLL_FSLSMODE_3PIN_PHY;
+
+ case OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM:
+ return OMAP_TLL_FSLSMODE_4PIN_PHY;
+
+ case OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0:
+ return OMAP_TLL_FSLSMODE_6PIN_TLL_DAT_SE0;
+
+ case OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM:
+ return OMAP_TLL_FSLSMODE_6PIN_TLL_DP_DM;
+
+ case OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0:
+ return OMAP_TLL_FSLSMODE_3PIN_TLL;
+
+ case OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM:
+ return OMAP_TLL_FSLSMODE_4PIN_TLL;
+
+ case OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0:
+ return OMAP_TLL_FSLSMODE_2PIN_TLL_DAT_SE0;
+
+ case OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM:
+ return OMAP_TLL_FSLSMODE_2PIN_DAT_DP_DM;
+ default:
+ pr_warning("Invalid port mode, using default\n");
+ return OMAP_TLL_FSLSMODE_6PIN_PHY_DAT_SE0;
+ }
+}
+
+static void usbhs_omap_tll_init(struct device *dev, u8 tll_channel_count)
+{
+ struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
+ struct usbhs_omap_platform_data *pdata = dev->platform_data;
+ unsigned reg;
+ int i;
+
+ /* Program Common TLL register */
+ reg = usbhs_read(omap->tll_base, OMAP_TLL_SHARED_CONF);
+ reg |= (OMAP_TLL_SHARED_CONF_FCLK_IS_ON
+ | OMAP_TLL_SHARED_CONF_USB_DIVRATION);
+ reg &= ~OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN;
+ reg &= ~OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN;
+
+ usbhs_write(omap->tll_base, OMAP_TLL_SHARED_CONF, reg);
+
+ /* Enable channels now */
+ for (i = 0; i < tll_channel_count; i++) {
+ reg = usbhs_read(omap->tll_base,
+ OMAP_TLL_CHANNEL_CONF(i));
+
+ if (is_ohci_port(pdata->port_mode[i])) {
+ reg |= ohci_omap3_fslsmode(pdata->port_mode[i])
+ << OMAP_TLL_CHANNEL_CONF_FSLSMODE_SHIFT;
+ reg |= OMAP_TLL_CHANNEL_CONF_CHANMODE_FSLS;
+ } else if (pdata->port_mode[i] == OMAP_EHCI_PORT_MODE_TLL) {
+
+ /* Disable AutoIdle, BitStuffing and use SDR Mode */
+ reg &= ~(OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE
+ | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF
+ | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE);
+
+ reg |= (1 << (i + 1));
+ } else
+ continue;
+
+ reg |= OMAP_TLL_CHANNEL_CONF_CHANEN;
+ usbhs_write(omap->tll_base,
+ OMAP_TLL_CHANNEL_CONF(i), reg);
+
+ usbhs_writeb(omap->tll_base,
+ OMAP_TLL_ULPI_SCRATCH_REGISTER(i), 0xbe);
+ }
+}
+
+static int usbhs_enable(struct device *dev)
+{
+ struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
+ struct usbhs_omap_platform_data *pdata = &omap->platdata;
+ unsigned long flags = 0;
+ int ret = 0;
+ unsigned long timeout;
+ unsigned reg;
+
+ dev_dbg(dev, "starting TI HSUSB Controller\n");
+ if (!pdata) {
+ dev_dbg(dev, "missing platform_data\n");
+ ret = -ENODEV;
+ goto end_enable;
+ }
+
+ spin_lock_irqsave(&omap->lock, flags);
+ if (omap->count > 0)
+ goto end_count;
+
+ clk_enable(omap->usbhost_ick);
+ clk_enable(omap->usbhost_hs_fck);
+ clk_enable(omap->usbhost_fs_fck);
+ clk_enable(omap->usbtll_fck);
+ clk_enable(omap->usbtll_ick);
+
+ if (pdata->ehci_data->phy_reset) {
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) {
+ gpio_request(pdata->ehci_data->reset_gpio_port[0],
+ "USB1 PHY reset");
+ gpio_direction_output
+ (pdata->ehci_data->reset_gpio_port[0], 0);
+ }
+
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) {
+ gpio_request(pdata->ehci_data->reset_gpio_port[1],
+ "USB2 PHY reset");
+ gpio_direction_output
+ (pdata->ehci_data->reset_gpio_port[1], 0);
+ }
+
+ /* Hold the PHY in RESET for enough time till DIR is high */
+ udelay(10);
+ }
+
+ omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
+ dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
+
+ /* perform TLL soft reset, and wait until reset is complete */
+ usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
+ OMAP_USBTLL_SYSCONFIG_SOFTRESET);
+
+ /* Wait for TLL reset to complete */
+ timeout = jiffies + msecs_to_jiffies(1000);
+ while (!(usbhs_read(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
+ & OMAP_USBTLL_SYSSTATUS_RESETDONE)) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout)) {
+ dev_dbg(dev, "operation timed out\n");
+ ret = -EINVAL;
+ goto err_tll;
+ }
+ }
+
+ dev_dbg(dev, "TLL RESET DONE\n");
+
+ /* (1<<3) = no idle mode only for initial debugging */
+ usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
+ OMAP_USBTLL_SYSCONFIG_ENAWAKEUP |
+ OMAP_USBTLL_SYSCONFIG_SIDLEMODE |
+ OMAP_USBTLL_SYSCONFIG_AUTOIDLE);
+
+ /* Put UHH in NoIdle/NoStandby mode */
+ reg = usbhs_read(omap->uhh_base, OMAP_UHH_SYSCONFIG);
+ if (is_omap_usbhs_rev1(omap)) {
+ reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
+ | OMAP_UHH_SYSCONFIG_SIDLEMODE
+ | OMAP_UHH_SYSCONFIG_CACTIVITY
+ | OMAP_UHH_SYSCONFIG_MIDLEMODE);
+ reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
+
+
+ } else if (is_omap_usbhs_rev2(omap)) {
+ reg &= ~OMAP4_UHH_SYSCONFIG_IDLEMODE_CLEAR;
+ reg |= OMAP4_UHH_SYSCONFIG_NOIDLE;
+ reg &= ~OMAP4_UHH_SYSCONFIG_STDBYMODE_CLEAR;
+ reg |= OMAP4_UHH_SYSCONFIG_NOSTDBY;
+ }
+
+ usbhs_write(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
+
+ reg = usbhs_read(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
+ /* setup ULPI bypass and burst configurations */
+ reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN
+ | OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN
+ | OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN);
+ reg |= OMAP4_UHH_HOSTCONFIG_APP_START_CLK;
+ reg &= ~OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN;
+
+ if (is_omap_usbhs_rev1(omap)) {
+ if (pdata->port_mode[0] == OMAP_USBHS_PORT_MODE_UNUSED)
+ reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS;
+ if (pdata->port_mode[1] == OMAP_USBHS_PORT_MODE_UNUSED)
+ reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS;
+ if (pdata->port_mode[2] == OMAP_USBHS_PORT_MODE_UNUSED)
+ reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
+
+ /* Bypass the TLL module for PHY mode operation */
+ if (cpu_is_omap3430() && (omap_rev() <= OMAP3430_REV_ES2_1)) {
+ dev_dbg(dev, "OMAP3 ES version <= ES2.1\n");
+ if (is_ehci_phy_mode(pdata->port_mode[0]) ||
+ is_ehci_phy_mode(pdata->port_mode[1]) ||
+ is_ehci_phy_mode(pdata->port_mode[2]))
+ reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
+ else
+ reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
+ } else {
+ dev_dbg(dev, "OMAP3 ES version > ES2.1\n");
+ if (is_ehci_phy_mode(pdata->port_mode[0]))
+ reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
+ else
+ reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
+ if (is_ehci_phy_mode(pdata->port_mode[1]))
+ reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
+ else
+ reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
+ if (is_ehci_phy_mode(pdata->port_mode[2]))
+ reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
+ else
+ reg |= OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
+ }
+ } else if (is_omap_usbhs_rev2(omap)) {
+ /* Clear port mode fields for PHY mode*/
+ reg &= ~OMAP4_P1_MODE_CLEAR;
+ reg &= ~OMAP4_P2_MODE_CLEAR;
+
+ if (is_ehci_phy_mode(pdata->port_mode[0])) {
+ ret = clk_set_parent(omap->utmi_p1_fck,
+ omap->xclk60mhsp1_ck);
+ if (ret != 0) {
+ dev_err(dev, "xclk60mhsp1_ck set parent"
+ "failed error:%d\n", ret);
+ goto err_tll;
+ }
+ } else if (is_ehci_tll_mode(pdata->port_mode[0])) {
+ ret = clk_set_parent(omap->utmi_p1_fck,
+ omap->init_60m_fclk);
+ if (ret != 0) {
+ dev_err(dev, "init_60m_fclk set parent"
+ "failed error:%d\n", ret);
+ goto err_tll;
+ }
+ clk_enable(omap->usbhost_p1_fck);
+ clk_enable(omap->usbtll_p1_fck);
+ }
+
+ if (is_ehci_phy_mode(pdata->port_mode[1])) {
+ ret = clk_set_parent(omap->utmi_p2_fck,
+ omap->xclk60mhsp2_ck);
+ if (ret != 0) {
+ dev_err(dev, "xclk60mhsp1_ck set parent"
+ "failed error:%d\n", ret);
+ goto err_tll;
+ }
+ } else if (is_ehci_tll_mode(pdata->port_mode[1])) {
+ ret = clk_set_parent(omap->utmi_p2_fck,
+ omap->init_60m_fclk);
+ if (ret != 0) {
+ dev_err(dev, "init_60m_fclk set parent"
+ "failed error:%d\n", ret);
+ goto err_tll;
+ }
+ clk_enable(omap->usbhost_p2_fck);
+ clk_enable(omap->usbtll_p2_fck);
+ }
+
+ clk_enable(omap->utmi_p1_fck);
+ clk_enable(omap->utmi_p2_fck);
+
+ if (is_ehci_tll_mode(pdata->port_mode[0]) ||
+ (is_ohci_port(pdata->port_mode[0])))
+ reg |= OMAP4_P1_MODE_TLL;
+ else if (is_ehci_hsic_mode(pdata->port_mode[0]))
+ reg |= OMAP4_P1_MODE_HSIC;
+
+ if (is_ehci_tll_mode(pdata->port_mode[1]) ||
+ (is_ohci_port(pdata->port_mode[1])))
+ reg |= OMAP4_P2_MODE_TLL;
+ else if (is_ehci_hsic_mode(pdata->port_mode[1]))
+ reg |= OMAP4_P2_MODE_HSIC;
+ }
+
+ usbhs_write(omap->uhh_base, OMAP_UHH_HOSTCONFIG, reg);
+ dev_dbg(dev, "UHH setup done, uhh_hostconfig=%x\n", reg);
+
+ if (is_ehci_tll_mode(pdata->port_mode[0]) ||
+ is_ehci_tll_mode(pdata->port_mode[1]) ||
+ is_ehci_tll_mode(pdata->port_mode[2]) ||
+ (is_ohci_port(pdata->port_mode[0])) ||
+ (is_ohci_port(pdata->port_mode[1])) ||
+ (is_ohci_port(pdata->port_mode[2]))) {
+
+ /* Enable UTMI mode for required TLL channels */
+ if (is_omap_usbhs_rev2(omap))
+ usbhs_omap_tll_init(dev, OMAP_REV2_TLL_CHANNEL_COUNT);
+ else
+ usbhs_omap_tll_init(dev, OMAP_TLL_CHANNEL_COUNT);
+ }
+
+ if (pdata->ehci_data->phy_reset) {
+ /* Hold the PHY in RESET for enough time till
+ * PHY is settled and ready
+ */
+ udelay(10);
+
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+ gpio_set_value
+ (pdata->ehci_data->reset_gpio_port[0], 1);
+
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+ gpio_set_value
+ (pdata->ehci_data->reset_gpio_port[1], 1);
+ }
+
+end_count:
+ omap->count++;
+ goto end_enable;
+
+err_tll:
+ if (pdata->ehci_data->phy_reset) {
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+ gpio_free(pdata->ehci_data->reset_gpio_port[0]);
+
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+ gpio_free(pdata->ehci_data->reset_gpio_port[1]);
+ }
+
+ clk_disable(omap->usbtll_ick);
+ clk_disable(omap->usbtll_fck);
+ clk_disable(omap->usbhost_fs_fck);
+ clk_disable(omap->usbhost_hs_fck);
+ clk_disable(omap->usbhost_ick);
+
+end_enable:
+ spin_unlock_irqrestore(&omap->lock, flags);
+ return ret;
+}
+
+static void usbhs_disable(struct device *dev)
+{
+ struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
+ struct usbhs_omap_platform_data *pdata = &omap->platdata;
+ unsigned long flags = 0;
+ unsigned long timeout;
+
+ dev_dbg(dev, "stopping TI HSUSB Controller\n");
+
+ spin_lock_irqsave(&omap->lock, flags);
+
+ if (omap->count == 0)
+ goto end_disble;
+
+ omap->count--;
+
+ if (omap->count != 0)
+ goto end_disble;
+
+ /* Reset OMAP modules for insmod/rmmod to work */
+ usbhs_write(omap->uhh_base, OMAP_UHH_SYSCONFIG,
+ is_omap_usbhs_rev2(omap) ?
+ OMAP4_UHH_SYSCONFIG_SOFTRESET :
+ OMAP_UHH_SYSCONFIG_SOFTRESET);
+
+ timeout = jiffies + msecs_to_jiffies(100);
+ while (!(usbhs_read(omap->uhh_base, OMAP_UHH_SYSSTATUS)
+ & (1 << 0))) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout))
+ dev_dbg(dev, "operation timed out\n");
+ }
+
+ while (!(usbhs_read(omap->uhh_base, OMAP_UHH_SYSSTATUS)
+ & (1 << 1))) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout))
+ dev_dbg(dev, "operation timed out\n");
+ }
+
+ while (!(usbhs_read(omap->uhh_base, OMAP_UHH_SYSSTATUS)
+ & (1 << 2))) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout))
+ dev_dbg(dev, "operation timed out\n");
+ }
+
+ usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG, (1 << 1));
+
+ while (!(usbhs_read(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
+ & (1 << 0))) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout))
+ dev_dbg(dev, "operation timed out\n");
+ }
+
+ if (pdata->ehci_data->phy_reset) {
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+ gpio_free(pdata->ehci_data->reset_gpio_port[0]);
+
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+ gpio_free(pdata->ehci_data->reset_gpio_port[1]);
+ }
+
+ clk_disable(omap->utmi_p2_fck);
+ clk_disable(omap->utmi_p1_fck);
+ clk_disable(omap->usbtll_ick);
+ clk_disable(omap->usbtll_fck);
+ clk_disable(omap->usbhost_fs_fck);
+ clk_disable(omap->usbhost_hs_fck);
+ clk_disable(omap->usbhost_ick);
+
+end_disble:
+ spin_unlock_irqrestore(&omap->lock, flags);
+}
+
+int omap_usbhs_enable(struct device *dev)
+{
+ return usbhs_enable(dev->parent);
+}
+EXPORT_SYMBOL_GPL(omap_usbhs_enable);
+
+void omap_usbhs_disable(struct device *dev)
+{
+ usbhs_disable(dev->parent);
+}
+EXPORT_SYMBOL_GPL(omap_usbhs_disable);
+
+static struct platform_driver usbhs_omap_driver = {
+ .driver = {
+ .name = (char *)usbhs_driver_name,
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(usbhs_omap_remove),
+};
+
+MODULE_AUTHOR("Keshava Munegowda <keshava_mgowda@ti.com>");
+MODULE_ALIAS("platform:" USBHS_DRIVER_NAME);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("usb host common core driver for omap EHCI and OHCI");
+
+static int __init omap_usbhs_drvinit(void)
+{
+ return platform_driver_probe(&usbhs_omap_driver, usbhs_omap_probe);
+}
+
+/*
+ * init before ehci and ohci drivers;
+ * The usbhs core driver should be initialized much before
+ * the omap ehci and ohci probe functions are called.
+ */
+fs_initcall(omap_usbhs_drvinit);
+
+static void __exit omap_usbhs_drvexit(void)
+{
+ platform_driver_unregister(&usbhs_omap_driver);
+}
+module_exit(omap_usbhs_drvexit);
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 501ce13b693..c1306ed43e3 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -21,6 +21,7 @@
#include <linux/workqueue.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
+#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/mfd/pcf50633/core.h>
@@ -230,27 +231,26 @@ pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name,
}
}
-#ifdef CONFIG_PM
-static int pcf50633_suspend(struct i2c_client *client, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int pcf50633_suspend(struct device *dev)
{
- struct pcf50633 *pcf;
- pcf = i2c_get_clientdata(client);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pcf50633 *pcf = i2c_get_clientdata(client);
return pcf50633_irq_suspend(pcf);
}
-static int pcf50633_resume(struct i2c_client *client)
+static int pcf50633_resume(struct device *dev)
{
- struct pcf50633 *pcf;
- pcf = i2c_get_clientdata(client);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pcf50633 *pcf = i2c_get_clientdata(client);
return pcf50633_irq_resume(pcf);
}
-#else
-#define pcf50633_suspend NULL
-#define pcf50633_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(pcf50633_pm, pcf50633_suspend, pcf50633_resume);
+
static int __devinit pcf50633_probe(struct i2c_client *client,
const struct i2c_device_id *ids)
{
@@ -360,16 +360,16 @@ static struct i2c_device_id pcf50633_id_table[] = {
{"pcf50633", 0x73},
{/* end of list */}
};
+MODULE_DEVICE_TABLE(i2c, pcf50633_id_table);
static struct i2c_driver pcf50633_driver = {
.driver = {
.name = "pcf50633",
+ .pm = &pcf50633_pm,
},
.id_table = pcf50633_id_table,
.probe = pcf50633_probe,
.remove = __devexit_p(pcf50633_remove),
- .suspend = pcf50633_suspend,
- .resume = pcf50633_resume,
};
static int __init pcf50633_init(void)
diff --git a/drivers/mfd/rdc321x-southbridge.c b/drivers/mfd/rdc321x-southbridge.c
index 50922975bda..193c940225b 100644
--- a/drivers/mfd/rdc321x-southbridge.c
+++ b/drivers/mfd/rdc321x-southbridge.c
@@ -61,12 +61,12 @@ static struct mfd_cell rdc321x_sb_cells[] = {
.name = "rdc321x-wdt",
.resources = rdc321x_wdt_resource,
.num_resources = ARRAY_SIZE(rdc321x_wdt_resource),
- .driver_data = &rdc321x_wdt_pdata,
+ .mfd_data = &rdc321x_wdt_pdata,
}, {
.name = "rdc321x-gpio",
.resources = rdc321x_gpio_resources,
.num_resources = ARRAY_SIZE(rdc321x_gpio_resources),
- .driver_data = &rdc321x_gpio_pdata,
+ .mfd_data = &rdc321x_gpio_pdata,
},
};
diff --git a/drivers/mfd/sh_mobile_sdhi.c b/drivers/mfd/sh_mobile_sdhi.c
index 0a7df44a93c..53a63024bf1 100644
--- a/drivers/mfd/sh_mobile_sdhi.c
+++ b/drivers/mfd/sh_mobile_sdhi.c
@@ -146,9 +146,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
}
memcpy(&priv->cell_mmc, &sh_mobile_sdhi_cell, sizeof(priv->cell_mmc));
- priv->cell_mmc.driver_data = mmc_data;
- priv->cell_mmc.platform_data = &priv->cell_mmc;
- priv->cell_mmc.data_size = sizeof(priv->cell_mmc);
+ priv->cell_mmc.mfd_data = mmc_data;
platform_set_drvdata(pdev, priv);
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 9caeb4ac6ea..af57fc706a4 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -170,7 +170,7 @@ static struct mfd_cell t7l66xb_cells[] = {
.name = "tmio-mmc",
.enable = t7l66xb_mmc_enable,
.disable = t7l66xb_mmc_disable,
- .driver_data = &t7166xb_mmc_data,
+ .mfd_data = &t7166xb_mmc_data,
.num_resources = ARRAY_SIZE(t7l66xb_mmc_resources),
.resources = t7l66xb_mmc_resources,
},
@@ -383,16 +383,7 @@ static int t7l66xb_probe(struct platform_device *dev)
t7l66xb_attach_irq(dev);
- t7l66xb_cells[T7L66XB_CELL_NAND].driver_data = pdata->nand_data;
- t7l66xb_cells[T7L66XB_CELL_NAND].platform_data =
- &t7l66xb_cells[T7L66XB_CELL_NAND];
- t7l66xb_cells[T7L66XB_CELL_NAND].data_size =
- sizeof(t7l66xb_cells[T7L66XB_CELL_NAND]);
-
- t7l66xb_cells[T7L66XB_CELL_MMC].platform_data =
- &t7l66xb_cells[T7L66XB_CELL_MMC];
- t7l66xb_cells[T7L66XB_CELL_MMC].data_size =
- sizeof(t7l66xb_cells[T7L66XB_CELL_MMC]);
+ t7l66xb_cells[T7L66XB_CELL_NAND].mfd_data = pdata->nand_data;
ret = mfd_add_devices(&dev->dev, dev->id,
t7l66xb_cells, ARRAY_SIZE(t7l66xb_cells),
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c
index 6315f63f017..b006f7cee95 100644
--- a/drivers/mfd/tc6387xb.c
+++ b/drivers/mfd/tc6387xb.c
@@ -131,7 +131,7 @@ static struct mfd_cell tc6387xb_cells[] = {
.name = "tmio-mmc",
.enable = tc6387xb_mmc_enable,
.disable = tc6387xb_mmc_disable,
- .driver_data = &tc6387xb_mmc_data,
+ .mfd_data = &tc6387xb_mmc_data,
.num_resources = ARRAY_SIZE(tc6387xb_mmc_resources),
.resources = tc6387xb_mmc_resources,
},
@@ -190,11 +190,6 @@ static int __devinit tc6387xb_probe(struct platform_device *dev)
printk(KERN_INFO "Toshiba tc6387xb initialised\n");
- tc6387xb_cells[TC6387XB_CELL_MMC].platform_data =
- &tc6387xb_cells[TC6387XB_CELL_MMC];
- tc6387xb_cells[TC6387XB_CELL_MMC].data_size =
- sizeof(tc6387xb_cells[TC6387XB_CELL_MMC]);
-
ret = mfd_add_devices(&dev->dev, dev->id, tc6387xb_cells,
ARRAY_SIZE(tc6387xb_cells), iomem, irq);
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 9a238633a54..3d62ded86a8 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -393,7 +393,7 @@ static struct mfd_cell __devinitdata tc6393xb_cells[] = {
.name = "tmio-mmc",
.enable = tc6393xb_mmc_enable,
.resume = tc6393xb_mmc_resume,
- .driver_data = &tc6393xb_mmc_data,
+ .mfd_data = &tc6393xb_mmc_data,
.num_resources = ARRAY_SIZE(tc6393xb_mmc_resources),
.resources = tc6393xb_mmc_resources,
},
@@ -693,27 +693,8 @@ static int __devinit tc6393xb_probe(struct platform_device *dev)
goto err_setup;
}
- tc6393xb_cells[TC6393XB_CELL_NAND].driver_data = tcpd->nand_data;
- tc6393xb_cells[TC6393XB_CELL_NAND].platform_data =
- &tc6393xb_cells[TC6393XB_CELL_NAND];
- tc6393xb_cells[TC6393XB_CELL_NAND].data_size =
- sizeof(tc6393xb_cells[TC6393XB_CELL_NAND]);
-
- tc6393xb_cells[TC6393XB_CELL_MMC].platform_data =
- &tc6393xb_cells[TC6393XB_CELL_MMC];
- tc6393xb_cells[TC6393XB_CELL_MMC].data_size =
- sizeof(tc6393xb_cells[TC6393XB_CELL_MMC]);
-
- tc6393xb_cells[TC6393XB_CELL_OHCI].platform_data =
- &tc6393xb_cells[TC6393XB_CELL_OHCI];
- tc6393xb_cells[TC6393XB_CELL_OHCI].data_size =
- sizeof(tc6393xb_cells[TC6393XB_CELL_OHCI]);
-
- tc6393xb_cells[TC6393XB_CELL_FB].driver_data = tcpd->fb_data;
- tc6393xb_cells[TC6393XB_CELL_FB].platform_data =
- &tc6393xb_cells[TC6393XB_CELL_FB];
- tc6393xb_cells[TC6393XB_CELL_FB].data_size =
- sizeof(tc6393xb_cells[TC6393XB_CELL_FB]);
+ tc6393xb_cells[TC6393XB_CELL_NAND].mfd_data = tcpd->nand_data;
+ tc6393xb_cells[TC6393XB_CELL_FB].mfd_data = tcpd->fb_data;
ret = mfd_add_devices(&dev->dev, dev->id,
tc6393xb_cells, ARRAY_SIZE(tc6393xb_cells),
diff --git a/drivers/mfd/ti-ssp.c b/drivers/mfd/ti-ssp.c
new file mode 100644
index 00000000000..af9ab0e5ca6
--- /dev/null
+++ b/drivers/mfd/ti-ssp.c
@@ -0,0 +1,476 @@
+/*
+ * Sequencer Serial Port (SSP) driver for Texas Instruments' SoCs
+ *
+ * Copyright (C) 2010 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/wait.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/ti_ssp.h>
+
+/* Register Offsets */
+#define REG_REV 0x00
+#define REG_IOSEL_1 0x04
+#define REG_IOSEL_2 0x08
+#define REG_PREDIV 0x0c
+#define REG_INTR_ST 0x10
+#define REG_INTR_EN 0x14
+#define REG_TEST_CTRL 0x18
+
+/* Per port registers */
+#define PORT_CFG_2 0x00
+#define PORT_ADDR 0x04
+#define PORT_DATA 0x08
+#define PORT_CFG_1 0x0c
+#define PORT_STATE 0x10
+
+#define SSP_PORT_CONFIG_MASK (SSP_EARLY_DIN | SSP_DELAY_DOUT)
+#define SSP_PORT_CLKRATE_MASK 0x0f
+
+#define SSP_SEQRAM_WR_EN BIT(4)
+#define SSP_SEQRAM_RD_EN BIT(5)
+#define SSP_START BIT(15)
+#define SSP_BUSY BIT(10)
+#define SSP_PORT_ASL BIT(7)
+#define SSP_PORT_CFO1 BIT(6)
+
+#define SSP_PORT_SEQRAM_SIZE 32
+
+static const int ssp_port_base[] = {0x040, 0x080};
+static const int ssp_port_seqram[] = {0x100, 0x180};
+
+struct ti_ssp {
+ struct resource *res;
+ struct device *dev;
+ void __iomem *regs;
+ spinlock_t lock;
+ struct clk *clk;
+ int irq;
+ wait_queue_head_t wqh;
+
+ /*
+ * Some of the iosel2 register bits always read-back as 0, we need to
+ * remember these values so that we don't clobber previously set
+ * values.
+ */
+ u32 iosel2;
+};
+
+static inline struct ti_ssp *dev_to_ssp(struct device *dev)
+{
+ return dev_get_drvdata(dev->parent);
+}
+
+static inline int dev_to_port(struct device *dev)
+{
+ return to_platform_device(dev)->id;
+}
+
+/* Register Access Helpers, rmw() functions need to run locked */
+static inline u32 ssp_read(struct ti_ssp *ssp, int reg)
+{
+ return __raw_readl(ssp->regs + reg);
+}
+
+static inline void ssp_write(struct ti_ssp *ssp, int reg, u32 val)
+{
+ __raw_writel(val, ssp->regs + reg);
+}
+
+static inline void ssp_rmw(struct ti_ssp *ssp, int reg, u32 mask, u32 bits)
+{
+ ssp_write(ssp, reg, (ssp_read(ssp, reg) & ~mask) | bits);
+}
+
+static inline u32 ssp_port_read(struct ti_ssp *ssp, int port, int reg)
+{
+ return ssp_read(ssp, ssp_port_base[port] + reg);
+}
+
+static inline void ssp_port_write(struct ti_ssp *ssp, int port, int reg,
+ u32 val)
+{
+ ssp_write(ssp, ssp_port_base[port] + reg, val);
+}
+
+static inline void ssp_port_rmw(struct ti_ssp *ssp, int port, int reg,
+ u32 mask, u32 bits)
+{
+ ssp_rmw(ssp, ssp_port_base[port] + reg, mask, bits);
+}
+
+static inline void ssp_port_clr_bits(struct ti_ssp *ssp, int port, int reg,
+ u32 bits)
+{
+ ssp_port_rmw(ssp, port, reg, bits, 0);
+}
+
+static inline void ssp_port_set_bits(struct ti_ssp *ssp, int port, int reg,
+ u32 bits)
+{
+ ssp_port_rmw(ssp, port, reg, 0, bits);
+}
+
+/* Called to setup port clock mode, caller must hold ssp->lock */
+static int __set_mode(struct ti_ssp *ssp, int port, int mode)
+{
+ mode &= SSP_PORT_CONFIG_MASK;
+ ssp_port_rmw(ssp, port, PORT_CFG_1, SSP_PORT_CONFIG_MASK, mode);
+
+ return 0;
+}
+
+int ti_ssp_set_mode(struct device *dev, int mode)
+{
+ struct ti_ssp *ssp = dev_to_ssp(dev);
+ int port = dev_to_port(dev);
+ int ret;
+
+ spin_lock(&ssp->lock);
+ ret = __set_mode(ssp, port, mode);
+ spin_unlock(&ssp->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(ti_ssp_set_mode);
+
+/* Called to setup iosel2, caller must hold ssp->lock */
+static void __set_iosel2(struct ti_ssp *ssp, u32 mask, u32 val)
+{
+ ssp->iosel2 = (ssp->iosel2 & ~mask) | val;
+ ssp_write(ssp, REG_IOSEL_2, ssp->iosel2);
+}
+
+/* Called to setup port iosel, caller must hold ssp->lock */
+static void __set_iosel(struct ti_ssp *ssp, int port, u32 iosel)
+{
+ unsigned val, shift = port ? 16 : 0;
+
+ /* IOSEL1 gets the least significant 16 bits */
+ val = ssp_read(ssp, REG_IOSEL_1);
+ val &= 0xffff << (port ? 0 : 16);
+ val |= (iosel & 0xffff) << (port ? 16 : 0);
+ ssp_write(ssp, REG_IOSEL_1, val);
+
+ /* IOSEL2 gets the most significant 16 bits */
+ val = (iosel >> 16) & 0x7;
+ __set_iosel2(ssp, 0x7 << shift, val << shift);
+}
+
+int ti_ssp_set_iosel(struct device *dev, u32 iosel)
+{
+ struct ti_ssp *ssp = dev_to_ssp(dev);
+ int port = dev_to_port(dev);
+
+ spin_lock(&ssp->lock);
+ __set_iosel(ssp, port, iosel);
+ spin_unlock(&ssp->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ti_ssp_set_iosel);
+
+int ti_ssp_load(struct device *dev, int offs, u32* prog, int len)
+{
+ struct ti_ssp *ssp = dev_to_ssp(dev);
+ int port = dev_to_port(dev);
+ int i;
+
+ if (len > SSP_PORT_SEQRAM_SIZE)
+ return -ENOSPC;
+
+ spin_lock(&ssp->lock);
+
+ /* Enable SeqRAM access */
+ ssp_port_set_bits(ssp, port, PORT_CFG_2, SSP_SEQRAM_WR_EN);
+
+ /* Copy code */
+ for (i = 0; i < len; i++) {
+ __raw_writel(prog[i], ssp->regs + offs + 4*i +
+ ssp_port_seqram[port]);
+ }
+
+ /* Disable SeqRAM access */
+ ssp_port_clr_bits(ssp, port, PORT_CFG_2, SSP_SEQRAM_WR_EN);
+
+ spin_unlock(&ssp->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ti_ssp_load);
+
+int ti_ssp_raw_read(struct device *dev)
+{
+ struct ti_ssp *ssp = dev_to_ssp(dev);
+ int port = dev_to_port(dev);
+ int shift = port ? 27 : 11;
+
+ return (ssp_read(ssp, REG_IOSEL_2) >> shift) & 0xf;
+}
+EXPORT_SYMBOL(ti_ssp_raw_read);
+
+int ti_ssp_raw_write(struct device *dev, u32 val)
+{
+ struct ti_ssp *ssp = dev_to_ssp(dev);
+ int port = dev_to_port(dev), shift;
+
+ spin_lock(&ssp->lock);
+
+ shift = port ? 22 : 6;
+ val &= 0xf;
+ __set_iosel2(ssp, 0xf << shift, val << shift);
+
+ spin_unlock(&ssp->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ti_ssp_raw_write);
+
+static inline int __xfer_done(struct ti_ssp *ssp, int port)
+{
+ return !(ssp_port_read(ssp, port, PORT_CFG_1) & SSP_BUSY);
+}
+
+int ti_ssp_run(struct device *dev, u32 pc, u32 input, u32 *output)
+{
+ struct ti_ssp *ssp = dev_to_ssp(dev);
+ int port = dev_to_port(dev);
+ int ret;
+
+ if (pc & ~(0x3f))
+ return -EINVAL;
+
+ /* Grab ssp->lock to serialize rmw on ssp registers */
+ spin_lock(&ssp->lock);
+
+ ssp_port_write(ssp, port, PORT_ADDR, input >> 16);
+ ssp_port_write(ssp, port, PORT_DATA, input & 0xffff);
+ ssp_port_rmw(ssp, port, PORT_CFG_1, 0x3f, pc);
+
+ /* grab wait queue head lock to avoid race with the isr */
+ spin_lock_irq(&ssp->wqh.lock);
+
+ /* kick off sequence execution in hardware */
+ ssp_port_set_bits(ssp, port, PORT_CFG_1, SSP_START);
+
+ /* drop ssp lock; no register writes beyond this */
+ spin_unlock(&ssp->lock);
+
+ ret = wait_event_interruptible_locked_irq(ssp->wqh,
+ __xfer_done(ssp, port));
+ spin_unlock_irq(&ssp->wqh.lock);
+
+ if (ret < 0)
+ return ret;
+
+ if (output) {
+ *output = (ssp_port_read(ssp, port, PORT_ADDR) << 16) |
+ (ssp_port_read(ssp, port, PORT_DATA) & 0xffff);
+ }
+
+ ret = ssp_port_read(ssp, port, PORT_STATE) & 0x3f; /* stop address */
+
+ return ret;
+}
+EXPORT_SYMBOL(ti_ssp_run);
+
+static irqreturn_t ti_ssp_interrupt(int irq, void *dev_data)
+{
+ struct ti_ssp *ssp = dev_data;
+
+ spin_lock(&ssp->wqh.lock);
+
+ ssp_write(ssp, REG_INTR_ST, 0x3);
+ wake_up_locked(&ssp->wqh);
+
+ spin_unlock(&ssp->wqh.lock);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit ti_ssp_probe(struct platform_device *pdev)
+{
+ static struct ti_ssp *ssp;
+ const struct ti_ssp_data *pdata = pdev->dev.platform_data;
+ int error = 0, prediv = 0xff, id;
+ unsigned long sysclk;
+ struct device *dev = &pdev->dev;
+ struct mfd_cell cells[2];
+
+ ssp = kzalloc(sizeof(*ssp), GFP_KERNEL);
+ if (!ssp) {
+ dev_err(dev, "cannot allocate device info\n");
+ return -ENOMEM;
+ }
+
+ ssp->dev = dev;
+ dev_set_drvdata(dev, ssp);
+
+ ssp->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!ssp->res) {
+ error = -ENODEV;
+ dev_err(dev, "cannot determine register area\n");
+ goto error_res;
+ }
+
+ if (!request_mem_region(ssp->res->start, resource_size(ssp->res),
+ pdev->name)) {
+ error = -ENOMEM;
+ dev_err(dev, "cannot claim register memory\n");
+ goto error_res;
+ }
+
+ ssp->regs = ioremap(ssp->res->start, resource_size(ssp->res));
+ if (!ssp->regs) {
+ error = -ENOMEM;
+ dev_err(dev, "cannot map register memory\n");
+ goto error_map;
+ }
+
+ ssp->clk = clk_get(dev, NULL);
+ if (IS_ERR(ssp->clk)) {
+ error = PTR_ERR(ssp->clk);
+ dev_err(dev, "cannot claim device clock\n");
+ goto error_clk;
+ }
+
+ ssp->irq = platform_get_irq(pdev, 0);
+ if (ssp->irq < 0) {
+ error = -ENODEV;
+ dev_err(dev, "unknown irq\n");
+ goto error_irq;
+ }
+
+ error = request_threaded_irq(ssp->irq, NULL, ti_ssp_interrupt, 0,
+ dev_name(dev), ssp);
+ if (error < 0) {
+ dev_err(dev, "cannot acquire irq\n");
+ goto error_irq;
+ }
+
+ spin_lock_init(&ssp->lock);
+ init_waitqueue_head(&ssp->wqh);
+
+ /* Power on and initialize SSP */
+ error = clk_enable(ssp->clk);
+ if (error) {
+ dev_err(dev, "cannot enable device clock\n");
+ goto error_enable;
+ }
+
+ /* Reset registers to a sensible known state */
+ ssp_write(ssp, REG_IOSEL_1, 0);
+ ssp_write(ssp, REG_IOSEL_2, 0);
+ ssp_write(ssp, REG_INTR_EN, 0x3);
+ ssp_write(ssp, REG_INTR_ST, 0x3);
+ ssp_write(ssp, REG_TEST_CTRL, 0);
+ ssp_port_write(ssp, 0, PORT_CFG_1, SSP_PORT_ASL);
+ ssp_port_write(ssp, 1, PORT_CFG_1, SSP_PORT_ASL);
+ ssp_port_write(ssp, 0, PORT_CFG_2, SSP_PORT_CFO1);
+ ssp_port_write(ssp, 1, PORT_CFG_2, SSP_PORT_CFO1);
+
+ sysclk = clk_get_rate(ssp->clk);
+ if (pdata && pdata->out_clock)
+ prediv = (sysclk / pdata->out_clock) - 1;
+ prediv = clamp(prediv, 0, 0xff);
+ ssp_rmw(ssp, REG_PREDIV, 0xff, prediv);
+
+ memset(cells, 0, sizeof(cells));
+ for (id = 0; id < 2; id++) {
+ const struct ti_ssp_dev_data *data = &pdata->dev_data[id];
+
+ cells[id].id = id;
+ cells[id].name = data->dev_name;
+ cells[id].platform_data = data->pdata;
+ cells[id].data_size = data->pdata_size;
+ }
+
+ error = mfd_add_devices(dev, 0, cells, 2, NULL, 0);
+ if (error < 0) {
+ dev_err(dev, "cannot add mfd cells\n");
+ goto error_enable;
+ }
+
+ return 0;
+
+error_enable:
+ free_irq(ssp->irq, ssp);
+error_irq:
+ clk_put(ssp->clk);
+error_clk:
+ iounmap(ssp->regs);
+error_map:
+ release_mem_region(ssp->res->start, resource_size(ssp->res));
+error_res:
+ kfree(ssp);
+ return error;
+}
+
+static int __devexit ti_ssp_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ti_ssp *ssp = dev_get_drvdata(dev);
+
+ mfd_remove_devices(dev);
+ clk_disable(ssp->clk);
+ free_irq(ssp->irq, ssp);
+ clk_put(ssp->clk);
+ iounmap(ssp->regs);
+ release_mem_region(ssp->res->start, resource_size(ssp->res));
+ kfree(ssp);
+ dev_set_drvdata(dev, NULL);
+ return 0;
+}
+
+static struct platform_driver ti_ssp_driver = {
+ .probe = ti_ssp_probe,
+ .remove = __devexit_p(ti_ssp_remove),
+ .driver = {
+ .name = "ti-ssp",
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init ti_ssp_init(void)
+{
+ return platform_driver_register(&ti_ssp_driver);
+}
+module_init(ti_ssp_init);
+
+static void __exit ti_ssp_exit(void)
+{
+ platform_driver_unregister(&ti_ssp_driver);
+}
+module_exit(ti_ssp_exit);
+
+MODULE_DESCRIPTION("Sequencer Serial Port (SSP) Driver");
+MODULE_AUTHOR("Cyril Chemparathy");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ti-ssp");
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index 6ad8a7f8d39..94c6c8afad1 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -384,8 +384,7 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = {
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
.resources = timberdale_dma_resources,
- .platform_data = &timb_dma_platform_data,
- .data_size = sizeof(timb_dma_platform_data),
+ .mfd_data = &timb_dma_platform_data,
},
{
.name = "timb-uart",
@@ -396,43 +395,37 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = {
.name = "xiic-i2c",
.num_resources = ARRAY_SIZE(timberdale_xiic_resources),
.resources = timberdale_xiic_resources,
- .platform_data = &timberdale_xiic_platform_data,
- .data_size = sizeof(timberdale_xiic_platform_data),
+ .mfd_data = &timberdale_xiic_platform_data,
},
{
.name = "timb-gpio",
.num_resources = ARRAY_SIZE(timberdale_gpio_resources),
.resources = timberdale_gpio_resources,
- .platform_data = &timberdale_gpio_platform_data,
- .data_size = sizeof(timberdale_gpio_platform_data),
+ .mfd_data = &timberdale_gpio_platform_data,
},
{
.name = "timb-video",
.num_resources = ARRAY_SIZE(timberdale_video_resources),
.resources = timberdale_video_resources,
- .platform_data = &timberdale_video_platform_data,
- .data_size = sizeof(timberdale_video_platform_data),
+ .mfd_data = &timberdale_video_platform_data,
},
{
.name = "timb-radio",
.num_resources = ARRAY_SIZE(timberdale_radio_resources),
.resources = timberdale_radio_resources,
- .platform_data = &timberdale_radio_platform_data,
- .data_size = sizeof(timberdale_radio_platform_data),
+ .mfd_data = &timberdale_radio_platform_data,
},
{
.name = "xilinx_spi",
.num_resources = ARRAY_SIZE(timberdale_spi_resources),
.resources = timberdale_spi_resources,
- .platform_data = &timberdale_xspi_platform_data,
- .data_size = sizeof(timberdale_xspi_platform_data),
+ .mfd_data = &timberdale_xspi_platform_data,
},
{
.name = "ks8842",
.num_resources = ARRAY_SIZE(timberdale_eth_resources),
.resources = timberdale_eth_resources,
- .platform_data = &timberdale_ks8842_platform_data,
- .data_size = sizeof(timberdale_ks8842_platform_data)
+ .mfd_data = &timberdale_ks8842_platform_data,
},
};
@@ -441,8 +434,7 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
.resources = timberdale_dma_resources,
- .platform_data = &timb_dma_platform_data,
- .data_size = sizeof(timb_dma_platform_data),
+ .mfd_data = &timb_dma_platform_data,
},
{
.name = "timb-uart",
@@ -458,15 +450,13 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
.name = "xiic-i2c",
.num_resources = ARRAY_SIZE(timberdale_xiic_resources),
.resources = timberdale_xiic_resources,
- .platform_data = &timberdale_xiic_platform_data,
- .data_size = sizeof(timberdale_xiic_platform_data),
+ .mfd_data = &timberdale_xiic_platform_data,
},
{
.name = "timb-gpio",
.num_resources = ARRAY_SIZE(timberdale_gpio_resources),
.resources = timberdale_gpio_resources,
- .platform_data = &timberdale_gpio_platform_data,
- .data_size = sizeof(timberdale_gpio_platform_data),
+ .mfd_data = &timberdale_gpio_platform_data,
},
{
.name = "timb-mlogicore",
@@ -477,29 +467,25 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
.name = "timb-video",
.num_resources = ARRAY_SIZE(timberdale_video_resources),
.resources = timberdale_video_resources,
- .platform_data = &timberdale_video_platform_data,
- .data_size = sizeof(timberdale_video_platform_data),
+ .mfd_data = &timberdale_video_platform_data,
},
{
.name = "timb-radio",
.num_resources = ARRAY_SIZE(timberdale_radio_resources),
.resources = timberdale_radio_resources,
- .platform_data = &timberdale_radio_platform_data,
- .data_size = sizeof(timberdale_radio_platform_data),
+ .mfd_data = &timberdale_radio_platform_data,
},
{
.name = "xilinx_spi",
.num_resources = ARRAY_SIZE(timberdale_spi_resources),
.resources = timberdale_spi_resources,
- .platform_data = &timberdale_xspi_platform_data,
- .data_size = sizeof(timberdale_xspi_platform_data),
+ .mfd_data = &timberdale_xspi_platform_data,
},
{
.name = "ks8842",
.num_resources = ARRAY_SIZE(timberdale_eth_resources),
.resources = timberdale_eth_resources,
- .platform_data = &timberdale_ks8842_platform_data,
- .data_size = sizeof(timberdale_ks8842_platform_data)
+ .mfd_data = &timberdale_ks8842_platform_data,
},
};
@@ -508,8 +494,7 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
.resources = timberdale_dma_resources,
- .platform_data = &timb_dma_platform_data,
- .data_size = sizeof(timb_dma_platform_data),
+ .mfd_data = &timb_dma_platform_data,
},
{
.name = "timb-uart",
@@ -520,36 +505,31 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
.name = "xiic-i2c",
.num_resources = ARRAY_SIZE(timberdale_xiic_resources),
.resources = timberdale_xiic_resources,
- .platform_data = &timberdale_xiic_platform_data,
- .data_size = sizeof(timberdale_xiic_platform_data),
+ .mfd_data = &timberdale_xiic_platform_data,
},
{
.name = "timb-gpio",
.num_resources = ARRAY_SIZE(timberdale_gpio_resources),
.resources = timberdale_gpio_resources,
- .platform_data = &timberdale_gpio_platform_data,
- .data_size = sizeof(timberdale_gpio_platform_data),
+ .mfd_data = &timberdale_gpio_platform_data,
},
{
.name = "timb-video",
.num_resources = ARRAY_SIZE(timberdale_video_resources),
.resources = timberdale_video_resources,
- .platform_data = &timberdale_video_platform_data,
- .data_size = sizeof(timberdale_video_platform_data),
+ .mfd_data = &timberdale_video_platform_data,
},
{
.name = "timb-radio",
.num_resources = ARRAY_SIZE(timberdale_radio_resources),
.resources = timberdale_radio_resources,
- .platform_data = &timberdale_radio_platform_data,
- .data_size = sizeof(timberdale_radio_platform_data),
+ .mfd_data = &timberdale_radio_platform_data,
},
{
.name = "xilinx_spi",
.num_resources = ARRAY_SIZE(timberdale_spi_resources),
.resources = timberdale_spi_resources,
- .platform_data = &timberdale_xspi_platform_data,
- .data_size = sizeof(timberdale_xspi_platform_data),
+ .mfd_data = &timberdale_xspi_platform_data,
},
};
@@ -558,8 +538,7 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
.resources = timberdale_dma_resources,
- .platform_data = &timb_dma_platform_data,
- .data_size = sizeof(timb_dma_platform_data),
+ .mfd_data = &timb_dma_platform_data,
},
{
.name = "timb-uart",
@@ -570,43 +549,37 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
.name = "ocores-i2c",
.num_resources = ARRAY_SIZE(timberdale_ocores_resources),
.resources = timberdale_ocores_resources,
- .platform_data = &timberdale_ocores_platform_data,
- .data_size = sizeof(timberdale_ocores_platform_data),
+ .mfd_data = &timberdale_ocores_platform_data,
},
{
.name = "timb-gpio",
.num_resources = ARRAY_SIZE(timberdale_gpio_resources),
.resources = timberdale_gpio_resources,
- .platform_data = &timberdale_gpio_platform_data,
- .data_size = sizeof(timberdale_gpio_platform_data),
+ .mfd_data = &timberdale_gpio_platform_data,
},
{
.name = "timb-video",
.num_resources = ARRAY_SIZE(timberdale_video_resources),
.resources = timberdale_video_resources,
- .platform_data = &timberdale_video_platform_data,
- .data_size = sizeof(timberdale_video_platform_data),
+ .mfd_data = &timberdale_video_platform_data,
},
{
.name = "timb-radio",
.num_resources = ARRAY_SIZE(timberdale_radio_resources),
.resources = timberdale_radio_resources,
- .platform_data = &timberdale_radio_platform_data,
- .data_size = sizeof(timberdale_radio_platform_data),
+ .mfd_data = &timberdale_radio_platform_data,
},
{
.name = "xilinx_spi",
.num_resources = ARRAY_SIZE(timberdale_spi_resources),
.resources = timberdale_spi_resources,
- .platform_data = &timberdale_xspi_platform_data,
- .data_size = sizeof(timberdale_xspi_platform_data),
+ .mfd_data = &timberdale_xspi_platform_data,
},
{
.name = "ks8842",
.num_resources = ARRAY_SIZE(timberdale_eth_resources),
.resources = timberdale_eth_resources,
- .platform_data = &timberdale_ks8842_platform_data,
- .data_size = sizeof(timberdale_ks8842_platform_data)
+ .mfd_data = &timberdale_ks8842_platform_data,
},
};
diff --git a/drivers/mfd/tps6105x.c b/drivers/mfd/tps6105x.c
new file mode 100644
index 00000000000..46d8205646b
--- /dev/null
+++ b/drivers/mfd/tps6105x.c
@@ -0,0 +1,246 @@
+/*
+ * Core driver for TPS61050/61052 boost converters, used for while LED
+ * driving, audio power amplification, white LED flash, and generic
+ * boost conversion. Additionally it provides a 1-bit GPIO pin (out or in)
+ * and a flash synchronization pin to synchronize flash events when used as
+ * flashgun.
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/regulator/driver.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps6105x.h>
+
+int tps6105x_set(struct tps6105x *tps6105x, u8 reg, u8 value)
+{
+ int ret;
+
+ ret = mutex_lock_interruptible(&tps6105x->lock);
+ if (ret)
+ return ret;
+ ret = i2c_smbus_write_byte_data(tps6105x->client, reg, value);
+ mutex_unlock(&tps6105x->lock);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(tps6105x_set);
+
+int tps6105x_get(struct tps6105x *tps6105x, u8 reg, u8 *buf)
+{
+ int ret;
+
+ ret = mutex_lock_interruptible(&tps6105x->lock);
+ if (ret)
+ return ret;
+ ret = i2c_smbus_read_byte_data(tps6105x->client, reg);
+ mutex_unlock(&tps6105x->lock);
+ if (ret < 0)
+ return ret;
+
+ *buf = ret;
+ return 0;
+}
+EXPORT_SYMBOL(tps6105x_get);
+
+/*
+ * Masks off the bits in the mask and sets the bits in the bitvalues
+ * parameter in one atomic operation
+ */
+int tps6105x_mask_and_set(struct tps6105x *tps6105x, u8 reg,
+ u8 bitmask, u8 bitvalues)
+{
+ int ret;
+ u8 regval;
+
+ ret = mutex_lock_interruptible(&tps6105x->lock);
+ if (ret)
+ return ret;
+ ret = i2c_smbus_read_byte_data(tps6105x->client, reg);
+ if (ret < 0)
+ goto fail;
+ regval = ret;
+ regval = (~bitmask & regval) | (bitmask & bitvalues);
+ ret = i2c_smbus_write_byte_data(tps6105x->client, reg, regval);
+fail:
+ mutex_unlock(&tps6105x->lock);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(tps6105x_mask_and_set);
+
+static int __devinit tps6105x_startup(struct tps6105x *tps6105x)
+{
+ int ret;
+ u8 regval;
+
+ ret = tps6105x_get(tps6105x, TPS6105X_REG_0, &regval);
+ if (ret)
+ return ret;
+ switch (regval >> TPS6105X_REG0_MODE_SHIFT) {
+ case TPS6105X_REG0_MODE_SHUTDOWN:
+ dev_info(&tps6105x->client->dev,
+ "TPS6105x found in SHUTDOWN mode\n");
+ break;
+ case TPS6105X_REG0_MODE_TORCH:
+ dev_info(&tps6105x->client->dev,
+ "TPS6105x found in TORCH mode\n");
+ break;
+ case TPS6105X_REG0_MODE_TORCH_FLASH:
+ dev_info(&tps6105x->client->dev,
+ "TPS6105x found in FLASH mode\n");
+ break;
+ case TPS6105X_REG0_MODE_VOLTAGE:
+ dev_info(&tps6105x->client->dev,
+ "TPS6105x found in VOLTAGE mode\n");
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * MFD cells - we have one cell which is selected operation
+ * mode, and we always have a GPIO cell.
+ */
+static struct mfd_cell tps6105x_cells[] = {
+ {
+ /* name will be runtime assigned */
+ .id = -1,
+ },
+ {
+ .name = "tps6105x-gpio",
+ .id = -1,
+ },
+};
+
+static int __devinit tps6105x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tps6105x *tps6105x;
+ struct tps6105x_platform_data *pdata;
+ int ret;
+ int i;
+
+ tps6105x = kmalloc(sizeof(*tps6105x), GFP_KERNEL);
+ if (!tps6105x)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, tps6105x);
+ tps6105x->client = client;
+ pdata = client->dev.platform_data;
+ tps6105x->pdata = pdata;
+ mutex_init(&tps6105x->lock);
+
+ ret = tps6105x_startup(tps6105x);
+ if (ret) {
+ dev_err(&client->dev, "chip initialization failed\n");
+ goto fail;
+ }
+
+ /* Remove warning texts when you implement new cell drivers */
+ switch (pdata->mode) {
+ case TPS6105X_MODE_SHUTDOWN:
+ dev_info(&client->dev,
+ "present, not used for anything, only GPIO\n");
+ break;
+ case TPS6105X_MODE_TORCH:
+ tps6105x_cells[0].name = "tps6105x-leds";
+ dev_warn(&client->dev,
+ "torch mode is unsupported\n");
+ break;
+ case TPS6105X_MODE_TORCH_FLASH:
+ tps6105x_cells[0].name = "tps6105x-flash";
+ dev_warn(&client->dev,
+ "flash mode is unsupported\n");
+ break;
+ case TPS6105X_MODE_VOLTAGE:
+ tps6105x_cells[0].name ="tps6105x-regulator";
+ break;
+ default:
+ break;
+ }
+
+ /* Set up and register the platform devices. */
+ for (i = 0; i < ARRAY_SIZE(tps6105x_cells); i++) {
+ /* One state holder for all drivers, this is simple */
+ tps6105x_cells[i].mfd_data = tps6105x;
+ }
+
+ ret = mfd_add_devices(&client->dev, 0, tps6105x_cells,
+ ARRAY_SIZE(tps6105x_cells), NULL, 0);
+ if (ret)
+ goto fail;
+
+ return 0;
+
+fail:
+ kfree(tps6105x);
+ return ret;
+}
+
+static int __devexit tps6105x_remove(struct i2c_client *client)
+{
+ struct tps6105x *tps6105x = i2c_get_clientdata(client);
+
+ mfd_remove_devices(&client->dev);
+
+ /* Put chip in shutdown mode */
+ tps6105x_mask_and_set(tps6105x, TPS6105X_REG_0,
+ TPS6105X_REG0_MODE_MASK,
+ TPS6105X_MODE_SHUTDOWN << TPS6105X_REG0_MODE_SHIFT);
+
+ kfree(tps6105x);
+ return 0;
+}
+
+static const struct i2c_device_id tps6105x_id[] = {
+ { "tps61050", 0 },
+ { "tps61052", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tps6105x_id);
+
+static struct i2c_driver tps6105x_driver = {
+ .driver = {
+ .name = "tps6105x",
+ },
+ .probe = tps6105x_probe,
+ .remove = __devexit_p(tps6105x_remove),
+ .id_table = tps6105x_id,
+};
+
+static int __init tps6105x_init(void)
+{
+ return i2c_add_driver(&tps6105x_driver);
+}
+subsys_initcall(tps6105x_init);
+
+static void __exit tps6105x_exit(void)
+{
+ i2c_del_driver(&tps6105x_driver);
+}
+module_exit(tps6105x_exit);
+
+MODULE_AUTHOR("Linus Walleij");
+MODULE_DESCRIPTION("TPS6105x White LED Boost Converter Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index e9018d1394e..0aa9186aec1 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -288,12 +288,10 @@ static int tps6586x_gpio_output(struct gpio_chip *gc, unsigned offset,
return tps6586x_update(tps6586x->dev, TPS6586X_GPIOSET1, val, mask);
}
-static void tps6586x_gpio_init(struct tps6586x *tps6586x, int gpio_base)
+static int tps6586x_gpio_init(struct tps6586x *tps6586x, int gpio_base)
{
- int ret;
-
if (!gpio_base)
- return;
+ return 0;
tps6586x->gpio.owner = THIS_MODULE;
tps6586x->gpio.label = tps6586x->client->name;
@@ -307,9 +305,7 @@ static void tps6586x_gpio_init(struct tps6586x *tps6586x, int gpio_base)
tps6586x->gpio.set = tps6586x_gpio_set;
tps6586x->gpio.get = tps6586x_gpio_get;
- ret = gpiochip_add(&tps6586x->gpio);
- if (ret)
- dev_warn(tps6586x->dev, "GPIO registration failed: %d\n", ret);
+ return gpiochip_add(&tps6586x->gpio);
}
static int __remove_subdev(struct device *dev, void *unused)
@@ -517,17 +513,28 @@ static int __devinit tps6586x_i2c_probe(struct i2c_client *client,
}
}
+ ret = tps6586x_gpio_init(tps6586x, pdata->gpio_base);
+ if (ret) {
+ dev_err(&client->dev, "GPIO registration failed: %d\n", ret);
+ goto err_gpio_init;
+ }
+
ret = tps6586x_add_subdevs(tps6586x, pdata);
if (ret) {
dev_err(&client->dev, "add devices failed: %d\n", ret);
goto err_add_devs;
}
- tps6586x_gpio_init(tps6586x, pdata->gpio_base);
-
return 0;
err_add_devs:
+ if (pdata->gpio_base) {
+ ret = gpiochip_remove(&tps6586x->gpio);
+ if (ret)
+ dev_err(&client->dev, "Can't remove gpio chip: %d\n",
+ ret);
+ }
+err_gpio_init:
if (client->irq)
free_irq(client->irq, tps6586x);
err_irq_init:
@@ -587,4 +594,3 @@ module_exit(tps6586x_exit);
MODULE_DESCRIPTION("TPS6586X core driver");
MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
MODULE_LICENSE("GPL");
-
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index a35fa7dcbf5..960b5bed7f5 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -721,13 +721,13 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
}
- if (twl_has_watchdog()) {
+ if (twl_has_watchdog() && twl_class_is_4030()) {
child = add_child(0, "twl4030_wdt", NULL, 0, false, 0, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
- if (twl_has_pwrbutton()) {
+ if (twl_has_pwrbutton() && twl_class_is_4030()) {
child = add_child(1, "twl4030_pwrbutton",
NULL, 0, true, pdata->irq_base + 8 + 0, 0);
if (IS_ERR(child))
@@ -864,6 +864,10 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
child = add_regulator(TWL6030_REG_VAUX3_6030, pdata->vaux3);
if (IS_ERR(child))
return PTR_ERR(child);
+
+ child = add_regulator(TWL6030_REG_CLK32KG, pdata->clk32kg);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
}
if (twl_has_bci() && pdata->bci &&
diff --git a/drivers/mfd/twl4030-codec.c b/drivers/mfd/twl4030-codec.c
index 9a4b196d6de..c02fded316c 100644
--- a/drivers/mfd/twl4030-codec.c
+++ b/drivers/mfd/twl4030-codec.c
@@ -208,15 +208,13 @@ static int __devinit twl4030_codec_probe(struct platform_device *pdev)
if (pdata->audio) {
cell = &codec->cells[childs];
cell->name = "twl4030-codec";
- cell->platform_data = pdata->audio;
- cell->data_size = sizeof(*pdata->audio);
+ cell->mfd_data = pdata->audio;
childs++;
}
if (pdata->vibra) {
cell = &codec->cells[childs];
cell->name = "twl4030-vibra";
- cell->platform_data = pdata->vibra;
- cell->data_size = sizeof(*pdata->vibra);
+ cell->mfd_data = pdata->vibra;
childs++;
}
diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
new file mode 100644
index 00000000000..3941ddcf15f
--- /dev/null
+++ b/drivers/mfd/twl4030-madc.c
@@ -0,0 +1,802 @@
+/*
+ *
+ * TWL4030 MADC module driver-This driver monitors the real time
+ * conversion of analog signals like battery temperature,
+ * battery type, battery level etc.
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * J Keerthy <j-keerthy@ti.com>
+ *
+ * Based on twl4030-madc.c
+ * Copyright (C) 2008 Nokia Corporation
+ * Mikko Ylinen <mikko.k.ylinen@nokia.com>
+ *
+ * Amit Kucheria <amit.kucheria@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/i2c/twl.h>
+#include <linux/i2c/twl4030-madc.h>
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/jiffies.h>
+#include <linux/types.h>
+#include <linux/gfp.h>
+#include <linux/err.h>
+
+/*
+ * struct twl4030_madc_data - a container for madc info
+ * @dev - pointer to device structure for madc
+ * @lock - mutex protecting this data structure
+ * @requests - Array of request struct corresponding to SW1, SW2 and RT
+ * @imr - Interrupt mask register of MADC
+ * @isr - Interrupt status register of MADC
+ */
+struct twl4030_madc_data {
+ struct device *dev;
+ struct mutex lock; /* mutex protecting this data structure */
+ struct twl4030_madc_request requests[TWL4030_MADC_NUM_METHODS];
+ int imr;
+ int isr;
+};
+
+static struct twl4030_madc_data *twl4030_madc;
+
+struct twl4030_prescale_divider_ratios {
+ s16 numerator;
+ s16 denominator;
+};
+
+static const struct twl4030_prescale_divider_ratios
+twl4030_divider_ratios[16] = {
+ {1, 1}, /* CHANNEL 0 No Prescaler */
+ {1, 1}, /* CHANNEL 1 No Prescaler */
+ {6, 10}, /* CHANNEL 2 */
+ {6, 10}, /* CHANNEL 3 */
+ {6, 10}, /* CHANNEL 4 */
+ {6, 10}, /* CHANNEL 5 */
+ {6, 10}, /* CHANNEL 6 */
+ {6, 10}, /* CHANNEL 7 */
+ {3, 14}, /* CHANNEL 8 */
+ {1, 3}, /* CHANNEL 9 */
+ {1, 1}, /* CHANNEL 10 No Prescaler */
+ {15, 100}, /* CHANNEL 11 */
+ {1, 4}, /* CHANNEL 12 */
+ {1, 1}, /* CHANNEL 13 Reserved channels */
+ {1, 1}, /* CHANNEL 14 Reseved channels */
+ {5, 11}, /* CHANNEL 15 */
+};
+
+
+/*
+ * Conversion table from -3 to 55 degree Celcius
+ */
+static int therm_tbl[] = {
+30800, 29500, 28300, 27100,
+26000, 24900, 23900, 22900, 22000, 21100, 20300, 19400, 18700, 17900,
+17200, 16500, 15900, 15300, 14700, 14100, 13600, 13100, 12600, 12100,
+11600, 11200, 10800, 10400, 10000, 9630, 9280, 8950, 8620, 8310,
+8020, 7730, 7460, 7200, 6950, 6710, 6470, 6250, 6040, 5830,
+5640, 5450, 5260, 5090, 4920, 4760, 4600, 4450, 4310, 4170,
+4040, 3910, 3790, 3670, 3550
+};
+
+/*
+ * Structure containing the registers
+ * of different conversion methods supported by MADC.
+ * Hardware or RT real time conversion request initiated by external host
+ * processor for RT Signal conversions.
+ * External host processors can also request for non RT conversions
+ * SW1 and SW2 software conversions also called asynchronous or GPC request.
+ */
+static
+const struct twl4030_madc_conversion_method twl4030_conversion_methods[] = {
+ [TWL4030_MADC_RT] = {
+ .sel = TWL4030_MADC_RTSELECT_LSB,
+ .avg = TWL4030_MADC_RTAVERAGE_LSB,
+ .rbase = TWL4030_MADC_RTCH0_LSB,
+ },
+ [TWL4030_MADC_SW1] = {
+ .sel = TWL4030_MADC_SW1SELECT_LSB,
+ .avg = TWL4030_MADC_SW1AVERAGE_LSB,
+ .rbase = TWL4030_MADC_GPCH0_LSB,
+ .ctrl = TWL4030_MADC_CTRL_SW1,
+ },
+ [TWL4030_MADC_SW2] = {
+ .sel = TWL4030_MADC_SW2SELECT_LSB,
+ .avg = TWL4030_MADC_SW2AVERAGE_LSB,
+ .rbase = TWL4030_MADC_GPCH0_LSB,
+ .ctrl = TWL4030_MADC_CTRL_SW2,
+ },
+};
+
+/*
+ * Function to read a particular channel value.
+ * @madc - pointer to struct twl4030_madc_data
+ * @reg - lsb of ADC Channel
+ * If the i2c read fails it returns an error else returns 0.
+ */
+static int twl4030_madc_channel_raw_read(struct twl4030_madc_data *madc, u8 reg)
+{
+ u8 msb, lsb;
+ int ret;
+ /*
+ * For each ADC channel, we have MSB and LSB register pair. MSB address
+ * is always LSB address+1. reg parameter is the address of LSB register
+ */
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &msb, reg + 1);
+ if (ret) {
+ dev_err(madc->dev, "unable to read MSB register 0x%X\n",
+ reg + 1);
+ return ret;
+ }
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &lsb, reg);
+ if (ret) {
+ dev_err(madc->dev, "unable to read LSB register 0x%X\n", reg);
+ return ret;
+ }
+
+ return (int)(((msb << 8) | lsb) >> 6);
+}
+
+/*
+ * Return battery temperature
+ * Or < 0 on failure.
+ */
+static int twl4030battery_temperature(int raw_volt)
+{
+ u8 val;
+ int temp, curr, volt, res, ret;
+
+ volt = (raw_volt * TEMP_STEP_SIZE) / TEMP_PSR_R;
+ /* Getting and calculating the supply current in micro ampers */
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE, &val,
+ REG_BCICTL2);
+ if (ret < 0)
+ return ret;
+ curr = ((val & TWL4030_BCI_ITHEN) + 1) * 10;
+ /* Getting and calculating the thermistor resistance in ohms */
+ res = volt * 1000 / curr;
+ /* calculating temperature */
+ for (temp = 58; temp >= 0; temp--) {
+ int actual = therm_tbl[temp];
+
+ if ((actual - res) >= 0)
+ break;
+ }
+
+ return temp + 1;
+}
+
+static int twl4030battery_current(int raw_volt)
+{
+ int ret;
+ u8 val;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE, &val,
+ TWL4030_BCI_BCICTL1);
+ if (ret)
+ return ret;
+ if (val & TWL4030_BCI_CGAIN) /* slope of 0.44 mV/mA */
+ return (raw_volt * CURR_STEP_SIZE) / CURR_PSR_R1;
+ else /* slope of 0.88 mV/mA */
+ return (raw_volt * CURR_STEP_SIZE) / CURR_PSR_R2;
+}
+/*
+ * Function to read channel values
+ * @madc - pointer to twl4030_madc_data struct
+ * @reg_base - Base address of the first channel
+ * @Channels - 16 bit bitmap. If the bit is set, channel value is read
+ * @buf - The channel values are stored here. if read fails error
+ * value is stored
+ * Returns the number of successfully read channels.
+ */
+static int twl4030_madc_read_channels(struct twl4030_madc_data *madc,
+ u8 reg_base, unsigned
+ long channels, int *buf)
+{
+ int count = 0, count_req = 0, i;
+ u8 reg;
+
+ for_each_set_bit(i, &channels, TWL4030_MADC_MAX_CHANNELS) {
+ reg = reg_base + 2 * i;
+ buf[i] = twl4030_madc_channel_raw_read(madc, reg);
+ if (buf[i] < 0) {
+ dev_err(madc->dev,
+ "Unable to read register 0x%X\n", reg);
+ count_req++;
+ continue;
+ }
+ switch (i) {
+ case 10:
+ buf[i] = twl4030battery_current(buf[i]);
+ if (buf[i] < 0) {
+ dev_err(madc->dev, "err reading current\n");
+ count_req++;
+ } else {
+ count++;
+ buf[i] = buf[i] - 750;
+ }
+ break;
+ case 1:
+ buf[i] = twl4030battery_temperature(buf[i]);
+ if (buf[i] < 0) {
+ dev_err(madc->dev, "err reading temperature\n");
+ count_req++;
+ } else {
+ buf[i] -= 3;
+ count++;
+ }
+ break;
+ default:
+ count++;
+ /* Analog Input (V) = conv_result * step_size / R
+ * conv_result = decimal value of 10-bit conversion
+ * result
+ * step size = 1.5 / (2 ^ 10 -1)
+ * R = Prescaler ratio for input channels.
+ * Result given in mV hence multiplied by 1000.
+ */
+ buf[i] = (buf[i] * 3 * 1000 *
+ twl4030_divider_ratios[i].denominator)
+ / (2 * 1023 *
+ twl4030_divider_ratios[i].numerator);
+ }
+ }
+ if (count_req)
+ dev_err(madc->dev, "%d channel conversion failed\n", count_req);
+
+ return count;
+}
+
+/*
+ * Enables irq.
+ * @madc - pointer to twl4030_madc_data struct
+ * @id - irq number to be enabled
+ * can take one of TWL4030_MADC_RT, TWL4030_MADC_SW1, TWL4030_MADC_SW2
+ * corresponding to RT, SW1, SW2 conversion requests.
+ * If the i2c read fails it returns an error else returns 0.
+ */
+static int twl4030_madc_enable_irq(struct twl4030_madc_data *madc, u8 id)
+{
+ u8 val;
+ int ret;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &val, madc->imr);
+ if (ret) {
+ dev_err(madc->dev, "unable to read imr register 0x%X\n",
+ madc->imr);
+ return ret;
+ }
+ val &= ~(1 << id);
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, val, madc->imr);
+ if (ret) {
+ dev_err(madc->dev,
+ "unable to write imr register 0x%X\n", madc->imr);
+ return ret;
+
+ }
+
+ return 0;
+}
+
+/*
+ * Disables irq.
+ * @madc - pointer to twl4030_madc_data struct
+ * @id - irq number to be disabled
+ * can take one of TWL4030_MADC_RT, TWL4030_MADC_SW1, TWL4030_MADC_SW2
+ * corresponding to RT, SW1, SW2 conversion requests.
+ * Returns error if i2c read/write fails.
+ */
+static int twl4030_madc_disable_irq(struct twl4030_madc_data *madc, u8 id)
+{
+ u8 val;
+ int ret;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &val, madc->imr);
+ if (ret) {
+ dev_err(madc->dev, "unable to read imr register 0x%X\n",
+ madc->imr);
+ return ret;
+ }
+ val |= (1 << id);
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, val, madc->imr);
+ if (ret) {
+ dev_err(madc->dev,
+ "unable to write imr register 0x%X\n", madc->imr);
+ return ret;
+ }
+
+ return 0;
+}
+
+static irqreturn_t twl4030_madc_threaded_irq_handler(int irq, void *_madc)
+{
+ struct twl4030_madc_data *madc = _madc;
+ const struct twl4030_madc_conversion_method *method;
+ u8 isr_val, imr_val;
+ int i, len, ret;
+ struct twl4030_madc_request *r;
+
+ mutex_lock(&madc->lock);
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &isr_val, madc->isr);
+ if (ret) {
+ dev_err(madc->dev, "unable to read isr register 0x%X\n",
+ madc->isr);
+ goto err_i2c;
+ }
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &imr_val, madc->imr);
+ if (ret) {
+ dev_err(madc->dev, "unable to read imr register 0x%X\n",
+ madc->imr);
+ goto err_i2c;
+ }
+ isr_val &= ~imr_val;
+ for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) {
+ if (!(isr_val & (1 << i)))
+ continue;
+ ret = twl4030_madc_disable_irq(madc, i);
+ if (ret < 0)
+ dev_dbg(madc->dev, "Disable interrupt failed%d\n", i);
+ madc->requests[i].result_pending = 1;
+ }
+ for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) {
+ r = &madc->requests[i];
+ /* No pending results for this method, move to next one */
+ if (!r->result_pending)
+ continue;
+ method = &twl4030_conversion_methods[r->method];
+ /* Read results */
+ len = twl4030_madc_read_channels(madc, method->rbase,
+ r->channels, r->rbuf);
+ /* Return results to caller */
+ if (r->func_cb != NULL) {
+ r->func_cb(len, r->channels, r->rbuf);
+ r->func_cb = NULL;
+ }
+ /* Free request */
+ r->result_pending = 0;
+ r->active = 0;
+ }
+ mutex_unlock(&madc->lock);
+
+ return IRQ_HANDLED;
+
+err_i2c:
+ /*
+ * In case of error check whichever request is active
+ * and service the same.
+ */
+ for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) {
+ r = &madc->requests[i];
+ if (r->active == 0)
+ continue;
+ method = &twl4030_conversion_methods[r->method];
+ /* Read results */
+ len = twl4030_madc_read_channels(madc, method->rbase,
+ r->channels, r->rbuf);
+ /* Return results to caller */
+ if (r->func_cb != NULL) {
+ r->func_cb(len, r->channels, r->rbuf);
+ r->func_cb = NULL;
+ }
+ /* Free request */
+ r->result_pending = 0;
+ r->active = 0;
+ }
+ mutex_unlock(&madc->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int twl4030_madc_set_irq(struct twl4030_madc_data *madc,
+ struct twl4030_madc_request *req)
+{
+ struct twl4030_madc_request *p;
+ int ret;
+
+ p = &madc->requests[req->method];
+ memcpy(p, req, sizeof(*req));
+ ret = twl4030_madc_enable_irq(madc, req->method);
+ if (ret < 0) {
+ dev_err(madc->dev, "enable irq failed!!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Function which enables the madc conversion
+ * by writing to the control register.
+ * @madc - pointer to twl4030_madc_data struct
+ * @conv_method - can be TWL4030_MADC_RT, TWL4030_MADC_SW2, TWL4030_MADC_SW1
+ * corresponding to RT SW1 or SW2 conversion methods.
+ * Returns 0 if succeeds else a negative error value
+ */
+static int twl4030_madc_start_conversion(struct twl4030_madc_data *madc,
+ int conv_method)
+{
+ const struct twl4030_madc_conversion_method *method;
+ int ret = 0;
+ method = &twl4030_conversion_methods[conv_method];
+ switch (conv_method) {
+ case TWL4030_MADC_SW1:
+ case TWL4030_MADC_SW2:
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC,
+ TWL4030_MADC_SW_START, method->ctrl);
+ if (ret) {
+ dev_err(madc->dev,
+ "unable to write ctrl register 0x%X\n",
+ method->ctrl);
+ return ret;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * Function that waits for conversion to be ready
+ * @madc - pointer to twl4030_madc_data struct
+ * @timeout_ms - timeout value in milliseconds
+ * @status_reg - ctrl register
+ * returns 0 if succeeds else a negative error value
+ */
+static int twl4030_madc_wait_conversion_ready(struct twl4030_madc_data *madc,
+ unsigned int timeout_ms,
+ u8 status_reg)
+{
+ unsigned long timeout;
+ int ret;
+
+ timeout = jiffies + msecs_to_jiffies(timeout_ms);
+ do {
+ u8 reg;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &reg, status_reg);
+ if (ret) {
+ dev_err(madc->dev,
+ "unable to read status register 0x%X\n",
+ status_reg);
+ return ret;
+ }
+ if (!(reg & TWL4030_MADC_BUSY) && (reg & TWL4030_MADC_EOC_SW))
+ return 0;
+ usleep_range(500, 2000);
+ } while (!time_after(jiffies, timeout));
+ dev_err(madc->dev, "conversion timeout!\n");
+
+ return -EAGAIN;
+}
+
+/*
+ * An exported function which can be called from other kernel drivers.
+ * @req twl4030_madc_request structure
+ * req->rbuf will be filled with read values of channels based on the
+ * channel index. If a particular channel reading fails there will
+ * be a negative error value in the corresponding array element.
+ * returns 0 if succeeds else error value
+ */
+int twl4030_madc_conversion(struct twl4030_madc_request *req)
+{
+ const struct twl4030_madc_conversion_method *method;
+ u8 ch_msb, ch_lsb;
+ int ret;
+
+ if (!req)
+ return -EINVAL;
+ mutex_lock(&twl4030_madc->lock);
+ if (req->method < TWL4030_MADC_RT || req->method > TWL4030_MADC_SW2) {
+ ret = -EINVAL;
+ goto out;
+ }
+ /* Do we have a conversion request ongoing */
+ if (twl4030_madc->requests[req->method].active) {
+ ret = -EBUSY;
+ goto out;
+ }
+ ch_msb = (req->channels >> 8) & 0xff;
+ ch_lsb = req->channels & 0xff;
+ method = &twl4030_conversion_methods[req->method];
+ /* Select channels to be converted */
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, ch_msb, method->sel + 1);
+ if (ret) {
+ dev_err(twl4030_madc->dev,
+ "unable to write sel register 0x%X\n", method->sel + 1);
+ return ret;
+ }
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, ch_lsb, method->sel);
+ if (ret) {
+ dev_err(twl4030_madc->dev,
+ "unable to write sel register 0x%X\n", method->sel + 1);
+ return ret;
+ }
+ /* Select averaging for all channels if do_avg is set */
+ if (req->do_avg) {
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC,
+ ch_msb, method->avg + 1);
+ if (ret) {
+ dev_err(twl4030_madc->dev,
+ "unable to write avg register 0x%X\n",
+ method->avg + 1);
+ return ret;
+ }
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC,
+ ch_lsb, method->avg);
+ if (ret) {
+ dev_err(twl4030_madc->dev,
+ "unable to write sel reg 0x%X\n",
+ method->sel + 1);
+ return ret;
+ }
+ }
+ if (req->type == TWL4030_MADC_IRQ_ONESHOT && req->func_cb != NULL) {
+ ret = twl4030_madc_set_irq(twl4030_madc, req);
+ if (ret < 0)
+ goto out;
+ ret = twl4030_madc_start_conversion(twl4030_madc, req->method);
+ if (ret < 0)
+ goto out;
+ twl4030_madc->requests[req->method].active = 1;
+ ret = 0;
+ goto out;
+ }
+ /* With RT method we should not be here anymore */
+ if (req->method == TWL4030_MADC_RT) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = twl4030_madc_start_conversion(twl4030_madc, req->method);
+ if (ret < 0)
+ goto out;
+ twl4030_madc->requests[req->method].active = 1;
+ /* Wait until conversion is ready (ctrl register returns EOC) */
+ ret = twl4030_madc_wait_conversion_ready(twl4030_madc, 5, method->ctrl);
+ if (ret) {
+ twl4030_madc->requests[req->method].active = 0;
+ goto out;
+ }
+ ret = twl4030_madc_read_channels(twl4030_madc, method->rbase,
+ req->channels, req->rbuf);
+ twl4030_madc->requests[req->method].active = 0;
+
+out:
+ mutex_unlock(&twl4030_madc->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(twl4030_madc_conversion);
+
+/*
+ * Return channel value
+ * Or < 0 on failure.
+ */
+int twl4030_get_madc_conversion(int channel_no)
+{
+ struct twl4030_madc_request req;
+ int temp = 0;
+ int ret;
+
+ req.channels = (1 << channel_no);
+ req.method = TWL4030_MADC_SW2;
+ req.active = 0;
+ req.func_cb = NULL;
+ ret = twl4030_madc_conversion(&req);
+ if (ret < 0)
+ return ret;
+ if (req.rbuf[channel_no] > 0)
+ temp = req.rbuf[channel_no];
+
+ return temp;
+}
+EXPORT_SYMBOL_GPL(twl4030_get_madc_conversion);
+
+/*
+ * Function to enable or disable bias current for
+ * main battery type reading or temperature sensing
+ * @madc - pointer to twl4030_madc_data struct
+ * @chan - can be one of the two values
+ * TWL4030_BCI_ITHEN - Enables bias current for main battery type reading
+ * TWL4030_BCI_TYPEN - Enables bias current for main battery temperature
+ * sensing
+ * @on - enable or disable chan.
+ */
+static int twl4030_madc_set_current_generator(struct twl4030_madc_data *madc,
+ int chan, int on)
+{
+ int ret;
+ u8 regval;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE,
+ &regval, TWL4030_BCI_BCICTL1);
+ if (ret) {
+ dev_err(madc->dev, "unable to read BCICTL1 reg 0x%X",
+ TWL4030_BCI_BCICTL1);
+ return ret;
+ }
+ if (on)
+ regval |= chan ? TWL4030_BCI_ITHEN : TWL4030_BCI_TYPEN;
+ else
+ regval &= chan ? ~TWL4030_BCI_ITHEN : ~TWL4030_BCI_TYPEN;
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MAIN_CHARGE,
+ regval, TWL4030_BCI_BCICTL1);
+ if (ret) {
+ dev_err(madc->dev, "unable to write BCICTL1 reg 0x%X\n",
+ TWL4030_BCI_BCICTL1);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Function that sets MADC software power on bit to enable MADC
+ * @madc - pointer to twl4030_madc_data struct
+ * @on - Enable or disable MADC software powen on bit.
+ * returns error if i2c read/write fails else 0
+ */
+static int twl4030_madc_set_power(struct twl4030_madc_data *madc, int on)
+{
+ u8 regval;
+ int ret;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE,
+ &regval, TWL4030_MADC_CTRL1);
+ if (ret) {
+ dev_err(madc->dev, "unable to read madc ctrl1 reg 0x%X\n",
+ TWL4030_MADC_CTRL1);
+ return ret;
+ }
+ if (on)
+ regval |= TWL4030_MADC_MADCON;
+ else
+ regval &= ~TWL4030_MADC_MADCON;
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, regval, TWL4030_MADC_CTRL1);
+ if (ret) {
+ dev_err(madc->dev, "unable to write madc ctrl1 reg 0x%X\n",
+ TWL4030_MADC_CTRL1);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Initialize MADC and request for threaded irq
+ */
+static int __devinit twl4030_madc_probe(struct platform_device *pdev)
+{
+ struct twl4030_madc_data *madc;
+ struct twl4030_madc_platform_data *pdata = pdev->dev.platform_data;
+ int ret;
+ u8 regval;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "platform_data not available\n");
+ return -EINVAL;
+ }
+ madc = kzalloc(sizeof(*madc), GFP_KERNEL);
+ if (!madc)
+ return -ENOMEM;
+
+ /*
+ * Phoenix provides 2 interrupt lines. The first one is connected to
+ * the OMAP. The other one can be connected to the other processor such
+ * as modem. Hence two separate ISR and IMR registers.
+ */
+ madc->imr = (pdata->irq_line == 1) ?
+ TWL4030_MADC_IMR1 : TWL4030_MADC_IMR2;
+ madc->isr = (pdata->irq_line == 1) ?
+ TWL4030_MADC_ISR1 : TWL4030_MADC_ISR2;
+ ret = twl4030_madc_set_power(madc, 1);
+ if (ret < 0)
+ goto err_power;
+ ret = twl4030_madc_set_current_generator(madc, 0, 1);
+ if (ret < 0)
+ goto err_current_generator;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE,
+ &regval, TWL4030_BCI_BCICTL1);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to read reg BCI CTL1 0x%X\n",
+ TWL4030_BCI_BCICTL1);
+ goto err_i2c;
+ }
+ regval |= TWL4030_BCI_MESBAT;
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MAIN_CHARGE,
+ regval, TWL4030_BCI_BCICTL1);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to write reg BCI Ctl1 0x%X\n",
+ TWL4030_BCI_BCICTL1);
+ goto err_i2c;
+ }
+ platform_set_drvdata(pdev, madc);
+ mutex_init(&madc->lock);
+ ret = request_threaded_irq(platform_get_irq(pdev, 0), NULL,
+ twl4030_madc_threaded_irq_handler,
+ IRQF_TRIGGER_RISING, "twl4030_madc", madc);
+ if (ret) {
+ dev_dbg(&pdev->dev, "could not request irq\n");
+ goto err_irq;
+ }
+ twl4030_madc = madc;
+ return 0;
+err_irq:
+ platform_set_drvdata(pdev, NULL);
+err_i2c:
+ twl4030_madc_set_current_generator(madc, 0, 0);
+err_current_generator:
+ twl4030_madc_set_power(madc, 0);
+err_power:
+ kfree(madc);
+
+ return ret;
+}
+
+static int __devexit twl4030_madc_remove(struct platform_device *pdev)
+{
+ struct twl4030_madc_data *madc = platform_get_drvdata(pdev);
+
+ free_irq(platform_get_irq(pdev, 0), madc);
+ platform_set_drvdata(pdev, NULL);
+ twl4030_madc_set_current_generator(madc, 0, 0);
+ twl4030_madc_set_power(madc, 0);
+ kfree(madc);
+
+ return 0;
+}
+
+static struct platform_driver twl4030_madc_driver = {
+ .probe = twl4030_madc_probe,
+ .remove = __exit_p(twl4030_madc_remove),
+ .driver = {
+ .name = "twl4030_madc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init twl4030_madc_init(void)
+{
+ return platform_driver_register(&twl4030_madc_driver);
+}
+
+module_init(twl4030_madc_init);
+
+static void __exit twl4030_madc_exit(void)
+{
+ platform_driver_unregister(&twl4030_madc_driver);
+}
+
+module_exit(twl4030_madc_exit);
+
+MODULE_DESCRIPTION("TWL4030 ADC driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("J Keerthy");
+MODULE_ALIAS("platform:twl4030_madc");
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index 92b85e28a15..38ffbd50a0d 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -60,6 +60,7 @@ static inline void ucb1x00_ts_evt_add(struct ucb1x00_ts *ts, u16 pressure, u16 x
input_report_abs(idev, ABS_X, x);
input_report_abs(idev, ABS_Y, y);
input_report_abs(idev, ABS_PRESSURE, pressure);
+ input_report_key(idev, BTN_TOUCH, 1);
input_sync(idev);
}
@@ -68,6 +69,7 @@ static inline void ucb1x00_ts_event_release(struct ucb1x00_ts *ts)
struct input_dev *idev = ts->idev;
input_report_abs(idev, ABS_PRESSURE, 0);
+ input_report_key(idev, BTN_TOUCH, 0);
input_sync(idev);
}
@@ -384,7 +386,8 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev)
idev->open = ucb1x00_ts_open;
idev->close = ucb1x00_ts_close;
- __set_bit(EV_ABS, idev->evbit);
+ idev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY);
+ idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
input_set_drvdata(idev, ts);
diff --git a/drivers/mfd/vx855.c b/drivers/mfd/vx855.c
index 348052aa5db..d698703dbd4 100644
--- a/drivers/mfd/vx855.c
+++ b/drivers/mfd/vx855.c
@@ -122,6 +122,7 @@ static struct pci_device_id vx855_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX855) },
{ 0, }
};
+MODULE_DEVICE_TABLE(pci, vx855_pci_tbl);
static struct pci_driver vx855_pci_driver = {
.name = "vx855",
diff --git a/drivers/mfd/wl1273-core.c b/drivers/mfd/wl1273-core.c
index d2ecc243573..529d65ba535 100644
--- a/drivers/mfd/wl1273-core.c
+++ b/drivers/mfd/wl1273-core.c
@@ -38,7 +38,6 @@ static int wl1273_core_remove(struct i2c_client *client)
dev_dbg(&client->dev, "%s\n", __func__);
mfd_remove_devices(&client->dev);
- i2c_set_clientdata(client, NULL);
kfree(core);
return 0;
@@ -79,8 +78,7 @@ static int __devinit wl1273_core_probe(struct i2c_client *client,
cell = &core->cells[children];
cell->name = "wl1273_fm_radio";
- cell->platform_data = &core;
- cell->data_size = sizeof(core);
+ cell->mfd_data = &core;
children++;
if (pdata->children & WL1273_CODEC_CHILD) {
@@ -88,8 +86,7 @@ static int __devinit wl1273_core_probe(struct i2c_client *client,
dev_dbg(&client->dev, "%s: Have codec.\n", __func__);
cell->name = "wl1273-codec";
- cell->platform_data = &core;
- cell->data_size = sizeof(core);
+ cell->mfd_data = &core;
children++;
}
@@ -104,7 +101,6 @@ static int __devinit wl1273_core_probe(struct i2c_client *client,
return 0;
err:
- i2c_set_clientdata(client, NULL);
pdata->free_resources();
kfree(core);
diff --git a/drivers/mfd/wm831x-i2c.c b/drivers/mfd/wm831x-i2c.c
index 3853fa8e7cc..a06cbc73971 100644
--- a/drivers/mfd/wm831x-i2c.c
+++ b/drivers/mfd/wm831x-i2c.c
@@ -51,17 +51,25 @@ static int wm831x_i2c_write_device(struct wm831x *wm831x, unsigned short reg,
int bytes, void *src)
{
struct i2c_client *i2c = wm831x->control_data;
- unsigned char msg[bytes + 2];
+ struct i2c_msg xfer[2];
int ret;
reg = cpu_to_be16(reg);
- memcpy(&msg[0], &reg, 2);
- memcpy(&msg[2], src, bytes);
- ret = i2c_master_send(i2c, msg, bytes + 2);
+ xfer[0].addr = i2c->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 2;
+ xfer[0].buf = (char *)&reg;
+
+ xfer[1].addr = i2c->addr;
+ xfer[1].flags = I2C_M_NOSTART;
+ xfer[1].len = bytes;
+ xfer[1].buf = (char *)src;
+
+ ret = i2c_transfer(i2c->adapter, xfer, 2);
if (ret < 0)
return ret;
- if (ret < bytes + 2)
+ if (ret != 2)
return -EIO;
return 0;
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index f7192d438aa..a5cd17e18d0 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -26,15 +26,6 @@
#include <linux/delay.h>
-/*
- * Since generic IRQs don't currently support interrupt controllers on
- * interrupt driven buses we don't use genirq but instead provide an
- * interface that looks very much like the standard ones. This leads
- * to some bodges, including storing interrupt handler information in
- * the static irq_data table we use to look up the data for individual
- * interrupts, but hopefully won't last too long.
- */
-
struct wm831x_irq_data {
int primary;
int reg;
@@ -361,6 +352,10 @@ static void wm831x_irq_sync_unlock(struct irq_data *data)
/* If there's been a change in the mask write it back
* to the hardware. */
if (wm831x->irq_masks_cur[i] != wm831x->irq_masks_cache[i]) {
+ dev_dbg(wm831x->dev, "IRQ mask sync: %x = %x\n",
+ WM831X_INTERRUPT_STATUS_1_MASK + i,
+ wm831x->irq_masks_cur[i]);
+
wm831x->irq_masks_cache[i] = wm831x->irq_masks_cur[i];
wm831x_reg_write(wm831x,
WM831X_INTERRUPT_STATUS_1_MASK + i,
@@ -371,7 +366,7 @@ static void wm831x_irq_sync_unlock(struct irq_data *data)
mutex_unlock(&wm831x->irq_lock);
}
-static void wm831x_irq_unmask(struct irq_data *data)
+static void wm831x_irq_enable(struct irq_data *data)
{
struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x,
@@ -380,7 +375,7 @@ static void wm831x_irq_unmask(struct irq_data *data)
wm831x->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
}
-static void wm831x_irq_mask(struct irq_data *data)
+static void wm831x_irq_disable(struct irq_data *data)
{
struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x,
@@ -426,8 +421,8 @@ static struct irq_chip wm831x_irq_chip = {
.name = "wm831x",
.irq_bus_lock = wm831x_irq_lock,
.irq_bus_sync_unlock = wm831x_irq_sync_unlock,
- .irq_mask = wm831x_irq_mask,
- .irq_unmask = wm831x_irq_unmask,
+ .irq_disable = wm831x_irq_disable,
+ .irq_enable = wm831x_irq_enable,
.irq_set_type = wm831x_irq_set_type,
};
@@ -449,6 +444,18 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
goto out;
}
+ /* The touch interrupts are visible in the primary register as
+ * an optimisation; open code this to avoid complicating the
+ * main handling loop and so we can also skip iterating the
+ * descriptors.
+ */
+ if (primary & WM831X_TCHPD_INT)
+ handle_nested_irq(wm831x->irq_base + WM831X_IRQ_TCHPD);
+ if (primary & WM831X_TCHDATA_INT)
+ handle_nested_irq(wm831x->irq_base + WM831X_IRQ_TCHDATA);
+ if (primary & (WM831X_TCHDATA_EINT | WM831X_TCHPD_EINT))
+ goto out;
+
for (i = 0; i < ARRAY_SIZE(wm831x_irqs); i++) {
int offset = wm831x_irqs[i].reg - 1;
@@ -481,6 +488,9 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
}
out:
+ /* Touchscreen interrupts are handled specially in the driver */
+ status_regs[0] &= ~(WM831X_TCHDATA_EINT | WM831X_TCHPD_EINT);
+
for (i = 0; i < ARRAY_SIZE(status_regs); i++) {
if (status_regs[i])
wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1 + i,
@@ -517,6 +527,14 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
return 0;
}
+ if (pdata->irq_cmos)
+ i = 0;
+ else
+ i = WM831X_IRQ_OD;
+
+ wm831x_set_bits(wm831x, WM831X_IRQ_CONFIG,
+ WM831X_IRQ_OD, i);
+
/* Try to flag /IRQ as a wake source; there are a number of
* unconditional wake sources in the PMIC so this isn't
* conditional but we don't actually care *too* much if it
diff --git a/drivers/mfd/wm831x-spi.c b/drivers/mfd/wm831x-spi.c
index 0a8f772be88..eed8e4f7a5a 100644
--- a/drivers/mfd/wm831x-spi.c
+++ b/drivers/mfd/wm831x-spi.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/pm.h>
#include <linux/spi/spi.h>
#include <linux/mfd/wm831x/core.h>
@@ -113,22 +114,27 @@ static int __devexit wm831x_spi_remove(struct spi_device *spi)
return 0;
}
-static int wm831x_spi_suspend(struct spi_device *spi, pm_message_t m)
+static int wm831x_spi_suspend(struct device *dev)
{
- struct wm831x *wm831x = dev_get_drvdata(&spi->dev);
+ struct wm831x *wm831x = dev_get_drvdata(dev);
return wm831x_device_suspend(wm831x);
}
+static const struct dev_pm_ops wm831x_spi_pm = {
+ .freeze = wm831x_spi_suspend,
+ .suspend = wm831x_spi_suspend,
+};
+
static struct spi_driver wm8310_spi_driver = {
.driver = {
.name = "wm8310",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &wm831x_spi_pm,
},
.probe = wm831x_spi_probe,
.remove = __devexit_p(wm831x_spi_remove),
- .suspend = wm831x_spi_suspend,
};
static struct spi_driver wm8311_spi_driver = {
@@ -136,10 +142,10 @@ static struct spi_driver wm8311_spi_driver = {
.name = "wm8311",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &wm831x_spi_pm,
},
.probe = wm831x_spi_probe,
.remove = __devexit_p(wm831x_spi_remove),
- .suspend = wm831x_spi_suspend,
};
static struct spi_driver wm8312_spi_driver = {
@@ -147,10 +153,10 @@ static struct spi_driver wm8312_spi_driver = {
.name = "wm8312",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &wm831x_spi_pm,
},
.probe = wm831x_spi_probe,
.remove = __devexit_p(wm831x_spi_remove),
- .suspend = wm831x_spi_suspend,
};
static struct spi_driver wm8320_spi_driver = {
@@ -158,10 +164,10 @@ static struct spi_driver wm8320_spi_driver = {
.name = "wm8320",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &wm831x_spi_pm,
},
.probe = wm831x_spi_probe,
.remove = __devexit_p(wm831x_spi_remove),
- .suspend = wm831x_spi_suspend,
};
static struct spi_driver wm8321_spi_driver = {
@@ -169,10 +175,10 @@ static struct spi_driver wm8321_spi_driver = {
.name = "wm8321",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &wm831x_spi_pm,
},
.probe = wm831x_spi_probe,
.remove = __devexit_p(wm831x_spi_remove),
- .suspend = wm831x_spi_suspend,
};
static struct spi_driver wm8325_spi_driver = {
@@ -180,10 +186,10 @@ static struct spi_driver wm8325_spi_driver = {
.name = "wm8325",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &wm831x_spi_pm,
},
.probe = wm831x_spi_probe,
.remove = __devexit_p(wm831x_spi_remove),
- .suspend = wm831x_spi_suspend,
};
static struct spi_driver wm8326_spi_driver = {
@@ -191,10 +197,10 @@ static struct spi_driver wm8326_spi_driver = {
.name = "wm8326",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = &wm831x_spi_pm,
},
.probe = wm831x_spi_probe,
.remove = __devexit_p(wm831x_spi_remove),
- .suspend = wm831x_spi_suspend,
};
static int __init wm831x_spi_init(void)
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index 1bfef4846b0..3a6e78cb038 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -245,7 +245,7 @@ static int wm8400_register_codec(struct wm8400 *wm8400)
{
struct mfd_cell cell = {
.name = "wm8400-codec",
- .driver_data = wm8400,
+ .mfd_data = wm8400,
};
return mfd_add_devices(wm8400->dev, -1, &cell, 1, NULL, 0);
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index f4016a075fd..e198d40292e 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -40,10 +40,8 @@ static int wm8994_read(struct wm8994 *wm8994, unsigned short reg,
return ret;
for (i = 0; i < bytes / 2; i++) {
- buf[i] = be16_to_cpu(buf[i]);
-
dev_vdbg(wm8994->dev, "Read %04x from R%d(0x%x)\n",
- buf[i], reg + i, reg + i);
+ be16_to_cpu(buf[i]), reg + i, reg + i);
}
return 0;
@@ -69,7 +67,7 @@ int wm8994_reg_read(struct wm8994 *wm8994, unsigned short reg)
if (ret < 0)
return ret;
else
- return val;
+ return be16_to_cpu(val);
}
EXPORT_SYMBOL_GPL(wm8994_reg_read);
@@ -79,7 +77,7 @@ EXPORT_SYMBOL_GPL(wm8994_reg_read);
* @wm8994: Device to read from
* @reg: First register
* @count: Number of registers
- * @buf: Buffer to fill.
+ * @buf: Buffer to fill. The data will be returned big endian.
*/
int wm8994_bulk_read(struct wm8994 *wm8994, unsigned short reg,
int count, u16 *buf)
@@ -97,9 +95,9 @@ int wm8994_bulk_read(struct wm8994 *wm8994, unsigned short reg,
EXPORT_SYMBOL_GPL(wm8994_bulk_read);
static int wm8994_write(struct wm8994 *wm8994, unsigned short reg,
- int bytes, void *src)
+ int bytes, const void *src)
{
- u16 *buf = src;
+ const u16 *buf = src;
int i;
BUG_ON(bytes % 2);
@@ -107,9 +105,7 @@ static int wm8994_write(struct wm8994 *wm8994, unsigned short reg,
for (i = 0; i < bytes / 2; i++) {
dev_vdbg(wm8994->dev, "Write %04x to R%d(0x%x)\n",
- buf[i], reg + i, reg + i);
-
- buf[i] = cpu_to_be16(buf[i]);
+ be16_to_cpu(buf[i]), reg + i, reg + i);
}
return wm8994->write_dev(wm8994, reg, bytes, src);
@@ -127,6 +123,8 @@ int wm8994_reg_write(struct wm8994 *wm8994, unsigned short reg,
{
int ret;
+ val = cpu_to_be16(val);
+
mutex_lock(&wm8994->io_lock);
ret = wm8994_write(wm8994, reg, 2, &val);
@@ -138,6 +136,29 @@ int wm8994_reg_write(struct wm8994 *wm8994, unsigned short reg,
EXPORT_SYMBOL_GPL(wm8994_reg_write);
/**
+ * wm8994_bulk_write: Write multiple WM8994 registers
+ *
+ * @wm8994: Device to write to
+ * @reg: First register
+ * @count: Number of registers
+ * @buf: Buffer to write from. Data must be big-endian formatted.
+ */
+int wm8994_bulk_write(struct wm8994 *wm8994, unsigned short reg,
+ int count, const u16 *buf)
+{
+ int ret;
+
+ mutex_lock(&wm8994->io_lock);
+
+ ret = wm8994_write(wm8994, reg, count * 2, buf);
+
+ mutex_unlock(&wm8994->io_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(wm8994_bulk_write);
+
+/**
* wm8994_set_bits: Set the value of a bitfield in a WM8994 register
*
* @wm8994: Device to write to.
@@ -157,9 +178,13 @@ int wm8994_set_bits(struct wm8994 *wm8994, unsigned short reg,
if (ret < 0)
goto out;
+ r = be16_to_cpu(r);
+
r &= ~mask;
r |= val;
+ r = cpu_to_be16(r);
+
ret = wm8994_write(wm8994, reg, 2, &r);
out:
@@ -271,6 +296,11 @@ static int wm8994_suspend(struct device *dev)
if (ret < 0)
dev_err(dev, "Failed to save LDO registers: %d\n", ret);
+ /* Explicitly put the device into reset in case regulators
+ * don't get disabled in order to ensure consistent restart.
+ */
+ wm8994_reg_write(wm8994, WM8994_SOFTWARE_RESET, 0x8994);
+
wm8994->suspended = true;
ret = regulator_bulk_disable(wm8994->num_supplies,
@@ -552,25 +582,29 @@ static int wm8994_i2c_read_device(struct wm8994 *wm8994, unsigned short reg,
return 0;
}
-/* Currently we allocate the write buffer on the stack; this is OK for
- * small writes - if we need to do large writes this will need to be
- * revised.
- */
static int wm8994_i2c_write_device(struct wm8994 *wm8994, unsigned short reg,
- int bytes, void *src)
+ int bytes, const void *src)
{
struct i2c_client *i2c = wm8994->control_data;
- unsigned char msg[bytes + 2];
+ struct i2c_msg xfer[2];
int ret;
reg = cpu_to_be16(reg);
- memcpy(&msg[0], &reg, 2);
- memcpy(&msg[2], src, bytes);
- ret = i2c_master_send(i2c, msg, bytes + 2);
+ xfer[0].addr = i2c->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 2;
+ xfer[0].buf = (char *)&reg;
+
+ xfer[1].addr = i2c->addr;
+ xfer[1].flags = I2C_M_NOSTART;
+ xfer[1].len = bytes;
+ xfer[1].buf = (char *)src;
+
+ ret = i2c_transfer(i2c->adapter, xfer, 2);
if (ret < 0)
return ret;
- if (ret < bytes + 2)
+ if (ret != 2)
return -EIO;
return 0;
@@ -612,7 +646,8 @@ static const struct i2c_device_id wm8994_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wm8994_i2c_id);
-UNIVERSAL_DEV_PM_OPS(wm8994_pm_ops, wm8994_suspend, wm8994_resume, NULL);
+static UNIVERSAL_DEV_PM_OPS(wm8994_pm_ops, wm8994_suspend, wm8994_resume,
+ NULL);
static struct i2c_driver wm8994_i2c_driver = {
.driver = {
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
index 29e8faf9c01..1e3bf4a2ff8 100644
--- a/drivers/mfd/wm8994-irq.c
+++ b/drivers/mfd/wm8994-irq.c
@@ -182,7 +182,7 @@ static void wm8994_irq_sync_unlock(struct irq_data *data)
mutex_unlock(&wm8994->irq_lock);
}
-static void wm8994_irq_unmask(struct irq_data *data)
+static void wm8994_irq_enable(struct irq_data *data)
{
struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994,
@@ -191,7 +191,7 @@ static void wm8994_irq_unmask(struct irq_data *data)
wm8994->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
}
-static void wm8994_irq_mask(struct irq_data *data)
+static void wm8994_irq_disable(struct irq_data *data)
{
struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994,
@@ -204,8 +204,8 @@ static struct irq_chip wm8994_irq_chip = {
.name = "wm8994",
.irq_bus_lock = wm8994_irq_lock,
.irq_bus_sync_unlock = wm8994_irq_sync_unlock,
- .irq_mask = wm8994_irq_mask,
- .irq_unmask = wm8994_irq_unmask,
+ .irq_disable = wm8994_irq_disable,
+ .irq_enable = wm8994_irq_enable,
};
/* The processing of the primary interrupt occurs in a thread so that
@@ -225,9 +225,11 @@ static irqreturn_t wm8994_irq_thread(int irq, void *data)
return IRQ_NONE;
}
- /* Apply masking */
- for (i = 0; i < WM8994_NUM_IRQ_REGS; i++)
+ /* Bit swap and apply masking */
+ for (i = 0; i < WM8994_NUM_IRQ_REGS; i++) {
+ status[i] = be16_to_cpu(status[i]);
status[i] &= ~wm8994->irq_masks_cur[i];
+ }
/* Report */
for (i = 0; i < ARRAY_SIZE(wm8994_irqs); i++) {
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 2a876c4099c..3b1f783bf92 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -58,12 +58,11 @@ config SDIO_UART
config MMC_TEST
tristate "MMC host test driver"
- default n
help
Development driver that performs a series of reads and writes
to a memory card in order to expose certain well known bugs
in host controllers. The tests are executed by writing to the
- "test" file in sysfs under each card. Note that whatever is
+ "test" file in debugfs under each card. Note that whatever is
on your card will be overwritten by these tests.
This driver is only of interest to those developing or
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index bfc8a8ae55d..61d233a7c11 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -621,6 +621,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
md->disk->private_data = md;
md->disk->queue = md->queue.queue;
md->disk->driverfs_dev = &card->dev;
+ set_disk_ro(md->disk, md->read_only);
/*
* As discussed on lkml, GENHD_FL_REMOVABLE should:
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 21adc27f413..5ec8eddfcf6 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -88,6 +88,7 @@ struct mmc_test_area {
* @sectors: amount of sectors to check in one group
* @ts: time values of transfer
* @rate: calculated transfer rate
+ * @iops: I/O operations per second (times 100)
*/
struct mmc_test_transfer_result {
struct list_head link;
@@ -95,6 +96,7 @@ struct mmc_test_transfer_result {
unsigned int sectors;
struct timespec ts;
unsigned int rate;
+ unsigned int iops;
};
/**
@@ -226,9 +228,10 @@ static int mmc_test_wait_busy(struct mmc_test_card *test)
if (!busy && mmc_test_busy(&cmd)) {
busy = 1;
- printk(KERN_INFO "%s: Warning: Host did not "
- "wait for busy state to end.\n",
- mmc_hostname(test->card->host));
+ if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
+ printk(KERN_INFO "%s: Warning: Host did not "
+ "wait for busy state to end.\n",
+ mmc_hostname(test->card->host));
}
} while (mmc_test_busy(&cmd));
@@ -494,7 +497,7 @@ static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
*/
static void mmc_test_save_transfer_result(struct mmc_test_card *test,
unsigned int count, unsigned int sectors, struct timespec ts,
- unsigned int rate)
+ unsigned int rate, unsigned int iops)
{
struct mmc_test_transfer_result *tr;
@@ -509,6 +512,7 @@ static void mmc_test_save_transfer_result(struct mmc_test_card *test,
tr->sectors = sectors;
tr->ts = ts;
tr->rate = rate;
+ tr->iops = iops;
list_add_tail(&tr->link, &test->gr->tr_lst);
}
@@ -519,20 +523,22 @@ static void mmc_test_save_transfer_result(struct mmc_test_card *test,
static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
struct timespec *ts1, struct timespec *ts2)
{
- unsigned int rate, sectors = bytes >> 9;
+ unsigned int rate, iops, sectors = bytes >> 9;
struct timespec ts;
ts = timespec_sub(*ts2, *ts1);
rate = mmc_test_rate(bytes, &ts);
+ iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
- "seconds (%u kB/s, %u KiB/s)\n",
+ "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
mmc_hostname(test->card->host), sectors, sectors >> 1,
(sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
- (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024);
+ (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
+ iops / 100, iops % 100);
- mmc_test_save_transfer_result(test, 1, sectors, ts, rate);
+ mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
}
/*
@@ -542,22 +548,24 @@ static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
unsigned int count, struct timespec *ts1,
struct timespec *ts2)
{
- unsigned int rate, sectors = bytes >> 9;
+ unsigned int rate, iops, sectors = bytes >> 9;
uint64_t tot = bytes * count;
struct timespec ts;
ts = timespec_sub(*ts2, *ts1);
rate = mmc_test_rate(tot, &ts);
+ iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
- "%lu.%09lu seconds (%u kB/s, %u KiB/s)\n",
+ "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
+ "%u.%02u IOPS)\n",
mmc_hostname(test->card->host), count, sectors, count,
sectors >> 1, (sectors & 1 ? ".5" : ""),
(unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
- rate / 1000, rate / 1024);
+ rate / 1000, rate / 1024, iops / 100, iops % 100);
- mmc_test_save_transfer_result(test, count, sectors, ts, rate);
+ mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
}
/*
@@ -1425,28 +1433,29 @@ static int mmc_test_area_cleanup(struct mmc_test_card *test)
}
/*
- * Initialize an area for testing large transfers. The size of the area is the
- * preferred erase size which is a good size for optimal transfer speed. Note
- * that is typically 4MiB for modern cards. The test area is set to the middle
- * of the card because cards may have different charateristics at the front
- * (for FAT file system optimization). Optionally, the area is erased (if the
- * card supports it) which may improve write performance. Optionally, the area
- * is filled with data for subsequent read tests.
+ * Initialize an area for testing large transfers. The test area is set to the
+ * middle of the card because cards may have different charateristics at the
+ * front (for FAT file system optimization). Optionally, the area is erased
+ * (if the card supports it) which may improve write performance. Optionally,
+ * the area is filled with data for subsequent read tests.
*/
static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
{
struct mmc_test_area *t = &test->area;
- unsigned long min_sz = 64 * 1024;
+ unsigned long min_sz = 64 * 1024, sz;
int ret;
ret = mmc_test_set_blksize(test, 512);
if (ret)
return ret;
- if (test->card->pref_erase > TEST_AREA_MAX_SIZE >> 9)
- t->max_sz = TEST_AREA_MAX_SIZE;
- else
- t->max_sz = (unsigned long)test->card->pref_erase << 9;
+ /* Make the test area size about 4MiB */
+ sz = (unsigned long)test->card->pref_erase << 9;
+ t->max_sz = sz;
+ while (t->max_sz < 4 * 1024 * 1024)
+ t->max_sz += sz;
+ while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
+ t->max_sz -= sz;
t->max_segs = test->card->host->max_segs;
t->max_seg_sz = test->card->host->max_seg_size;
@@ -1766,6 +1775,188 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
return 0;
}
+static unsigned int rnd_next = 1;
+
+static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
+{
+ uint64_t r;
+
+ rnd_next = rnd_next * 1103515245 + 12345;
+ r = (rnd_next >> 16) & 0x7fff;
+ return (r * rnd_cnt) >> 15;
+}
+
+static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
+ unsigned long sz)
+{
+ unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
+ unsigned int ssz;
+ struct timespec ts1, ts2, ts;
+ int ret;
+
+ ssz = sz >> 9;
+
+ rnd_addr = mmc_test_capacity(test->card) / 4;
+ range1 = rnd_addr / test->card->pref_erase;
+ range2 = range1 / ssz;
+
+ getnstimeofday(&ts1);
+ for (cnt = 0; cnt < UINT_MAX; cnt++) {
+ getnstimeofday(&ts2);
+ ts = timespec_sub(ts2, ts1);
+ if (ts.tv_sec >= 10)
+ break;
+ ea = mmc_test_rnd_num(range1);
+ if (ea == last_ea)
+ ea -= 1;
+ last_ea = ea;
+ dev_addr = rnd_addr + test->card->pref_erase * ea +
+ ssz * mmc_test_rnd_num(range2);
+ ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
+ if (ret)
+ return ret;
+ }
+ if (print)
+ mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+ return 0;
+}
+
+static int mmc_test_random_perf(struct mmc_test_card *test, int write)
+{
+ unsigned int next;
+ unsigned long sz;
+ int ret;
+
+ for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
+ /*
+ * When writing, try to get more consistent results by running
+ * the test twice with exactly the same I/O but outputting the
+ * results only for the 2nd run.
+ */
+ if (write) {
+ next = rnd_next;
+ ret = mmc_test_rnd_perf(test, write, 0, sz);
+ if (ret)
+ return ret;
+ rnd_next = next;
+ }
+ ret = mmc_test_rnd_perf(test, write, 1, sz);
+ if (ret)
+ return ret;
+ }
+ sz = test->area.max_tfr;
+ if (write) {
+ next = rnd_next;
+ ret = mmc_test_rnd_perf(test, write, 0, sz);
+ if (ret)
+ return ret;
+ rnd_next = next;
+ }
+ return mmc_test_rnd_perf(test, write, 1, sz);
+}
+
+/*
+ * Random read performance by transfer size.
+ */
+static int mmc_test_random_read_perf(struct mmc_test_card *test)
+{
+ return mmc_test_random_perf(test, 0);
+}
+
+/*
+ * Random write performance by transfer size.
+ */
+static int mmc_test_random_write_perf(struct mmc_test_card *test)
+{
+ return mmc_test_random_perf(test, 1);
+}
+
+static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
+ unsigned int tot_sz, int max_scatter)
+{
+ unsigned int dev_addr, i, cnt, sz, ssz;
+ struct timespec ts1, ts2, ts;
+ int ret;
+
+ sz = test->area.max_tfr;
+ /*
+ * In the case of a maximally scattered transfer, the maximum transfer
+ * size is further limited by using PAGE_SIZE segments.
+ */
+ if (max_scatter) {
+ struct mmc_test_area *t = &test->area;
+ unsigned long max_tfr;
+
+ if (t->max_seg_sz >= PAGE_SIZE)
+ max_tfr = t->max_segs * PAGE_SIZE;
+ else
+ max_tfr = t->max_segs * t->max_seg_sz;
+ if (sz > max_tfr)
+ sz = max_tfr;
+ }
+
+ ssz = sz >> 9;
+ dev_addr = mmc_test_capacity(test->card) / 4;
+ if (tot_sz > dev_addr << 9)
+ tot_sz = dev_addr << 9;
+ cnt = tot_sz / sz;
+ dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
+
+ getnstimeofday(&ts1);
+ for (i = 0; i < cnt; i++) {
+ ret = mmc_test_area_io(test, sz, dev_addr, write,
+ max_scatter, 0);
+ if (ret)
+ return ret;
+ dev_addr += ssz;
+ }
+ getnstimeofday(&ts2);
+
+ ts = timespec_sub(ts2, ts1);
+ mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+
+ return 0;
+}
+
+static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
+{
+ int ret, i;
+
+ for (i = 0; i < 10; i++) {
+ ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
+ if (ret)
+ return ret;
+ }
+ for (i = 0; i < 5; i++) {
+ ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
+ if (ret)
+ return ret;
+ }
+ for (i = 0; i < 3; i++) {
+ ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+/*
+ * Large sequential read performance.
+ */
+static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
+{
+ return mmc_test_large_seq_perf(test, 0);
+}
+
+/*
+ * Large sequential write performance.
+ */
+static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
+{
+ return mmc_test_large_seq_perf(test, 1);
+}
+
static const struct mmc_test_case mmc_test_cases[] = {
{
.name = "Basic write (no data verification)",
@@ -2005,6 +2196,34 @@ static const struct mmc_test_case mmc_test_cases[] = {
.cleanup = mmc_test_area_cleanup,
},
+ {
+ .name = "Random read performance by transfer size",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_random_read_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Random write performance by transfer size",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_random_write_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Large sequential read into scattered pages",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_large_seq_read_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Large sequential write from scattered pages",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_large_seq_write_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
};
static DEFINE_MUTEX(mmc_test_lock);
@@ -2148,11 +2367,11 @@ static int mtf_test_show(struct seq_file *sf, void *data)
seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
list_for_each_entry(tr, &gr->tr_lst, link) {
- seq_printf(sf, "%u %d %lu.%09lu %u\n",
+ seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
tr->count, tr->sectors,
(unsigned long)tr->ts.tv_sec,
(unsigned long)tr->ts.tv_nsec,
- tr->rate);
+ tr->rate, tr->iops / 100, tr->iops % 100);
}
}
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 86b47911933..639501970b4 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_MMC) += mmc_core.o
mmc_core-y := core.o bus.o host.o \
mmc.o mmc_ops.o sd.o sd_ops.o \
sdio.o sdio_ops.o sdio_bus.o \
- sdio_cis.o sdio_io.o sdio_irq.o
+ sdio_cis.o sdio_io.o sdio_irq.o \
+ quirks.o
mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 150b5f3cd40..1f453acc868 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -167,8 +167,6 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
WARN_ON(!host->claimed);
- led_trigger_event(host->led, LED_FULL);
-
mrq->cmd->error = 0;
mrq->cmd->mrq = mrq;
if (mrq->data) {
@@ -194,6 +192,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
}
}
mmc_host_clk_ungate(host);
+ led_trigger_event(host->led, LED_FULL);
host->ops->request(host, mrq);
}
@@ -528,7 +527,14 @@ int mmc_try_claim_host(struct mmc_host *host)
}
EXPORT_SYMBOL(mmc_try_claim_host);
-static void mmc_do_release_host(struct mmc_host *host)
+/**
+ * mmc_do_release_host - release a claimed host
+ * @host: mmc host to release
+ *
+ * If you successfully claimed a host, this function will
+ * release it again.
+ */
+void mmc_do_release_host(struct mmc_host *host)
{
unsigned long flags;
@@ -543,6 +549,7 @@ static void mmc_do_release_host(struct mmc_host *host)
wake_up(&host->wq);
}
}
+EXPORT_SYMBOL(mmc_do_release_host);
void mmc_host_deeper_disable(struct work_struct *work)
{
@@ -1002,6 +1009,13 @@ static void mmc_power_off(struct mmc_host *host)
{
host->ios.clock = 0;
host->ios.vdd = 0;
+
+ /*
+ * Reset ocr mask to be the highest possible voltage supported for
+ * this mmc host. This value will be used at next power up.
+ */
+ host->ocr = 1 << (fls(host->ocr_avail) - 1);
+
if (!mmc_host_is_spi(host)) {
host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
host->ios.chip_select = MMC_CS_DONTCARE;
@@ -1495,6 +1509,12 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
mmc_hostname(host), __func__, host->f_init);
#endif
mmc_power_up(host);
+
+ /*
+ * sdio_reset sends CMD52 to reset card. Since we do not know
+ * if the card is being re-initialized, just send it. CMD52
+ * should be ignored by SD/eMMC cards.
+ */
sdio_reset(host);
mmc_go_idle(host);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index ca1fdde29df..20b1c0831ea 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -61,6 +61,8 @@ int mmc_attach_mmc(struct mmc_host *host);
int mmc_attach_sd(struct mmc_host *host);
int mmc_attach_sdio(struct mmc_host *host);
+void mmc_fixup_device(struct mmc_card *card);
+
/* Module parameters */
extern int use_spi_crc;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 9ae3dbfba36..2b200c1cfbb 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -160,10 +160,7 @@ static bool mmc_host_may_gate_card(struct mmc_card *card)
* gate the clock, because there is somebody out there that may still
* be using it.
*/
- if (mmc_card_sdio(card))
- return false;
-
- return true;
+ return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING);
}
/**
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 16006ef153f..14e95f39a7b 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -302,6 +302,44 @@ static int mmc_read_ext_csd(struct mmc_card *card)
}
if (card->ext_csd.rev >= 4) {
+ /*
+ * Enhanced area feature support -- check whether the eMMC
+ * card has the Enhanced area enabled. If so, export enhanced
+ * area offset and size to user by adding sysfs interface.
+ */
+ if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
+ (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
+ u8 hc_erase_grp_sz =
+ ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
+ u8 hc_wp_grp_sz =
+ ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
+
+ card->ext_csd.enhanced_area_en = 1;
+ /*
+ * calculate the enhanced data area offset, in bytes
+ */
+ card->ext_csd.enhanced_area_offset =
+ (ext_csd[139] << 24) + (ext_csd[138] << 16) +
+ (ext_csd[137] << 8) + ext_csd[136];
+ if (mmc_card_blockaddr(card))
+ card->ext_csd.enhanced_area_offset <<= 9;
+ /*
+ * calculate the enhanced data area size, in kilobytes
+ */
+ card->ext_csd.enhanced_area_size =
+ (ext_csd[142] << 16) + (ext_csd[141] << 8) +
+ ext_csd[140];
+ card->ext_csd.enhanced_area_size *=
+ (size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
+ card->ext_csd.enhanced_area_size <<= 9;
+ } else {
+ /*
+ * If the enhanced area is not enabled, disable these
+ * device attributes.
+ */
+ card->ext_csd.enhanced_area_offset = -EINVAL;
+ card->ext_csd.enhanced_area_size = -EINVAL;
+ }
card->ext_csd.sec_trim_mult =
ext_csd[EXT_CSD_SEC_TRIM_MULT];
card->ext_csd.sec_erase_mult =
@@ -336,6 +374,9 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
+MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
+ card->ext_csd.enhanced_area_offset);
+MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
static struct attribute *mmc_std_attrs[] = {
&dev_attr_cid.attr,
@@ -349,6 +390,8 @@ static struct attribute *mmc_std_attrs[] = {
&dev_attr_name.attr,
&dev_attr_oemid.attr,
&dev_attr_serial.attr,
+ &dev_attr_enhanced_area_offset.attr,
+ &dev_attr_enhanced_area_size.attr,
NULL,
};
@@ -378,6 +421,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
int err, ddr = 0;
u32 cid[4];
unsigned int max_dtr;
+ u32 rocr;
BUG_ON(!host);
WARN_ON(!host->claimed);
@@ -391,7 +435,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
mmc_go_idle(host);
/* The extra bit indicates that we support high capacity */
- err = mmc_send_op_cond(host, ocr | (1 << 30), NULL);
+ err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
if (err)
goto err;
@@ -479,11 +523,51 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
err = mmc_read_ext_csd(card);
if (err)
goto free_card;
+
+ /* If doing byte addressing, check if required to do sector
+ * addressing. Handle the case of <2GB cards needing sector
+ * addressing. See section 8.1 JEDEC Standard JED84-A441;
+ * ocr register has bit 30 set for sector addressing.
+ */
+ if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30)))
+ mmc_card_set_blockaddr(card);
+
/* Erase size depends on CSD and Extended CSD */
mmc_set_erase_size(card);
}
/*
+ * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF
+ * bit. This bit will be lost everytime after a reset or power off.
+ */
+ if (card->ext_csd.enhanced_area_en) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_ERASE_GROUP_DEF, 1);
+
+ if (err && err != -EBADMSG)
+ goto free_card;
+
+ if (err) {
+ err = 0;
+ /*
+ * Just disable enhanced area off & sz
+ * will try to enable ERASE_GROUP_DEF
+ * during next time reinit
+ */
+ card->ext_csd.enhanced_area_offset = -EINVAL;
+ card->ext_csd.enhanced_area_size = -EINVAL;
+ } else {
+ card->ext_csd.erase_group_def = 1;
+ /*
+ * enable ERASE_GRP_DEF successfully.
+ * This will affect the erase size, so
+ * here need to reset erase size
+ */
+ mmc_set_erase_size(card);
+ }
+ }
+
+ /*
* Activate high speed (if supported)
*/
if ((card->ext_csd.hs_max_dtr != 0) &&
diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c
new file mode 100644
index 00000000000..11118b74eb2
--- /dev/null
+++ b/drivers/mmc/core/quirks.c
@@ -0,0 +1,84 @@
+/*
+ * This file contains work-arounds for many known sdio hardware
+ * bugs.
+ *
+ * Copyright (c) 2011 Pierre Tardy <tardyp@gmail.com>
+ * Inspired from pci fixup code:
+ * Copyright (c) 1999 Martin Mares <mj@ucw.cz>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mmc/card.h>
+#include <linux/mod_devicetable.h>
+
+/*
+ * The world is not perfect and supplies us with broken mmc/sdio devices.
+ * For at least a part of these bugs we need a work-around
+ */
+
+struct mmc_fixup {
+ u16 vendor, device; /* You can use SDIO_ANY_ID here of course */
+ void (*vendor_fixup)(struct mmc_card *card, int data);
+ int data;
+};
+
+/*
+ * This hook just adds a quirk unconditionnally
+ */
+static void __maybe_unused add_quirk(struct mmc_card *card, int data)
+{
+ card->quirks |= data;
+}
+
+/*
+ * This hook just removes a quirk unconditionnally
+ */
+static void __maybe_unused remove_quirk(struct mmc_card *card, int data)
+{
+ card->quirks &= ~data;
+}
+
+/*
+ * This hook just adds a quirk for all sdio devices
+ */
+static void add_quirk_for_sdio_devices(struct mmc_card *card, int data)
+{
+ if (mmc_card_sdio(card))
+ card->quirks |= data;
+}
+
+#ifndef SDIO_VENDOR_ID_TI
+#define SDIO_VENDOR_ID_TI 0x0097
+#endif
+
+#ifndef SDIO_DEVICE_ID_TI_WL1271
+#define SDIO_DEVICE_ID_TI_WL1271 0x4076
+#endif
+
+static const struct mmc_fixup mmc_fixup_methods[] = {
+ /* by default sdio devices are considered CLK_GATING broken */
+ /* good cards will be whitelisted as they are tested */
+ { SDIO_ANY_ID, SDIO_ANY_ID,
+ add_quirk_for_sdio_devices, MMC_QUIRK_BROKEN_CLK_GATING },
+ { SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
+ remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING },
+ { 0 }
+};
+
+void mmc_fixup_device(struct mmc_card *card)
+{
+ const struct mmc_fixup *f;
+
+ for (f = mmc_fixup_methods; f->vendor_fixup; f++) {
+ if ((f->vendor == card->cis.vendor
+ || f->vendor == (u16) SDIO_ANY_ID) &&
+ (f->device == card->cis.device
+ || f->device == (u16) SDIO_ANY_ID)) {
+ dev_dbg(&card->dev, "calling %pF\n", f->vendor_fixup);
+ f->vendor_fixup(card, f->data);
+ }
+ }
+}
+EXPORT_SYMBOL(mmc_fixup_device);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index d18c32bca99..6dac89fe053 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -21,6 +21,7 @@
#include "core.h"
#include "bus.h"
#include "mmc_ops.h"
+#include "sd.h"
#include "sd_ops.h"
static const unsigned int tran_exp[] = {
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index fbe1ea47502..db0f0b44d68 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -466,6 +466,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
card = oldcard;
}
+ mmc_fixup_device(card);
if (card->type == MMC_TYPE_SD_COMBO) {
err = mmc_sd_setup_card(host, card, oldcard != NULL);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index afe8c6fa166..1a21c6427a1 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -225,7 +225,7 @@ config MMC_OMAP
config MMC_OMAP_HS
tristate "TI OMAP High Speed Multimedia Card Interface support"
- depends on ARCH_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4
+ depends on SOC_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4
help
This selects the TI OMAP High Speed Multimedia card Interface.
If you have an OMAP2430 or OMAP3 board or OMAP4 board with a
@@ -311,7 +311,7 @@ config MMC_MSM
config MMC_MXC
tristate "Freescale i.MX2/3 Multimedia Card Interface support"
- depends on ARCH_MXC
+ depends on MACH_MX21 || MACH_MX27 || ARCH_MX31
help
This selects the Freescale i.MX2/3 Multimedia card Interface.
If you have a i.MX platform with a Multimedia Card slot,
@@ -319,6 +319,15 @@ config MMC_MXC
If unsure, say N.
+config MMC_MXS
+ tristate "Freescale MXS Multimedia Card Interface support"
+ depends on ARCH_MXS && MXS_DMA
+ help
+ This selects the Freescale SSP MMC controller found on MXS based
+ platforms like mx23/28.
+
+ If unsure, say N.
+
config MMC_TIFM_SD
tristate "TI Flash Media MMC/SD Interface support (EXPERIMENTAL)"
depends on EXPERIMENTAL && PCI
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index e834fb223e9..30aa6867745 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_MMC_ARMMMCI) += mmci.o
obj-$(CONFIG_MMC_PXA) += pxamci.o
obj-$(CONFIG_MMC_IMX) += imxmmc.o
obj-$(CONFIG_MMC_MXC) += mxcmmc.o
+obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
obj-$(CONFIG_MMC_SDHCI_PXA) += sdhci-pxa.o
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index ad2a7a032cd..80bc9a5c25c 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -578,7 +578,8 @@ static void atmci_dma_cleanup(struct atmel_mci *host)
struct mmc_data *data = host->data;
if (data)
- dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
+ dma_unmap_sg(host->dma.chan->device->dev,
+ data->sg, data->sg_len,
((data->flags & MMC_DATA_WRITE)
? DMA_TO_DEVICE : DMA_FROM_DEVICE));
}
@@ -588,7 +589,7 @@ static void atmci_stop_dma(struct atmel_mci *host)
struct dma_chan *chan = host->data_chan;
if (chan) {
- chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dmaengine_terminate_all(chan);
atmci_dma_cleanup(host);
} else {
/* Data transfer was stopped by the interrupt handler */
@@ -684,11 +685,11 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
else
direction = DMA_TO_DEVICE;
- sglen = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, direction);
- if (sglen != data->sg_len)
- goto unmap_exit;
+ sglen = dma_map_sg(chan->device->dev, data->sg,
+ data->sg_len, direction);
+
desc = chan->device->device_prep_slave_sg(chan,
- data->sg, data->sg_len, direction,
+ data->sg, sglen, direction,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
goto unmap_exit;
@@ -699,7 +700,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
return 0;
unmap_exit:
- dma_unmap_sg(&host->pdev->dev, data->sg, sglen, direction);
+ dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction);
return -ENOMEM;
}
@@ -709,8 +710,8 @@ static void atmci_submit_data(struct atmel_mci *host)
struct dma_async_tx_descriptor *desc = host->dma.data_desc;
if (chan) {
- desc->tx_submit(desc);
- chan->device->device_issue_pending(chan);
+ dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
}
}
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index 66b4ce587f4..ce2a47b71dd 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -205,7 +205,7 @@ static int cb710_wait_while_busy(struct cb710_slot *slot, uint8_t mask)
"WAIT12: waited %d loops, mask %02X, entry val %08X, exit val %08X\n",
limit, mask, e, x);
#endif
- return 0;
+ return err;
}
static void cb710_mmc_set_transfer_size(struct cb710_slot *slot,
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 2fcc82577c1..5a614069cb0 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -32,6 +32,7 @@
#include <linux/mmc/mmc.h>
#include <linux/mmc/dw_mmc.h>
#include <linux/bitops.h>
+#include <linux/regulator/consumer.h>
#include "dw_mmc.h"
@@ -562,7 +563,8 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
/* enable clock */
- mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE);
+ mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE |
+ SDMMC_CLKEN_LOW_PWR);
/* inform CIU */
mci_send_cmd(slot,
@@ -661,6 +663,7 @@ static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
+ u32 regs;
/* set default 1 bit mode */
slot->ctype = SDMMC_CTYPE_1BIT;
@@ -672,6 +675,16 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_BUS_WIDTH_4:
slot->ctype = SDMMC_CTYPE_4BIT;
break;
+ case MMC_BUS_WIDTH_8:
+ slot->ctype = SDMMC_CTYPE_8BIT;
+ break;
+ }
+
+ /* DDR mode set */
+ if (ios->ddr) {
+ regs = mci_readl(slot->host, UHS_REG);
+ regs |= (0x1 << slot->id) << 16;
+ mci_writel(slot->host, UHS_REG, regs);
}
if (ios->clock) {
@@ -717,7 +730,9 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
struct dw_mci_board *brd = slot->host->pdata;
/* Use platform get_cd function, else try onboard card detect */
- if (brd->get_cd)
+ if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
+ present = 1;
+ else if (brd->get_cd)
present = !brd->get_cd(slot->id);
else
present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
@@ -1019,13 +1034,10 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
struct mmc_data *data = host->data;
int shift = host->data_shift;
u32 status;
- unsigned int nbytes = 0, len, old_len, count = 0;
+ unsigned int nbytes = 0, len;
do {
len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift;
- if (count == 0)
- old_len = len;
-
if (offset + len <= sg->length) {
host->pull_data(host, (void *)(buf + offset), len);
@@ -1070,7 +1082,6 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
tasklet_schedule(&host->tasklet);
return;
}
- count++;
} while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
len = SDMMC_GET_FCNT(mci_readl(host, STATUS));
host->pio_offset = offset;
@@ -1395,7 +1406,11 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
if (host->pdata->setpower)
host->pdata->setpower(id, 0);
- mmc->caps = 0;
+ if (host->pdata->caps)
+ mmc->caps = host->pdata->caps;
+ else
+ mmc->caps = 0;
+
if (host->pdata->get_bus_wd)
if (host->pdata->get_bus_wd(slot->id) >= 4)
mmc->caps |= MMC_CAP_4_BIT_DATA;
@@ -1426,6 +1441,13 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
}
#endif /* CONFIG_MMC_DW_IDMAC */
+ host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
+ if (IS_ERR(host->vmmc)) {
+ printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc));
+ host->vmmc = NULL;
+ } else
+ regulator_enable(host->vmmc);
+
if (dw_mci_get_cd(mmc))
set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
else
@@ -1441,6 +1463,12 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
/* Card initially undetected */
slot->last_detect_state = 0;
+ /*
+ * Card may have been plugged in prior to boot so we
+ * need to run the detect tasklet
+ */
+ tasklet_schedule(&host->card_tasklet);
+
return 0;
}
@@ -1619,8 +1647,9 @@ static int dw_mci_probe(struct platform_device *pdev)
*/
fifo_size = mci_readl(host, FIFOTH);
fifo_size = (fifo_size >> 16) & 0x7ff;
- mci_writel(host, FIFOTH, ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
- ((fifo_size/2) << 0)));
+ host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
+ ((fifo_size/2) << 0));
+ mci_writel(host, FIFOTH, host->fifoth_val);
/* disable clock to CIU */
mci_writel(host, CLKENA, 0);
@@ -1683,6 +1712,12 @@ err_dmaunmap:
host->sg_cpu, host->sg_dma);
iounmap(host->regs);
+ if (host->vmmc) {
+ regulator_disable(host->vmmc);
+ regulator_put(host->vmmc);
+ }
+
+
err_freehost:
kfree(host);
return ret;
@@ -1714,6 +1749,11 @@ static int __exit dw_mci_remove(struct platform_device *pdev)
if (host->use_dma && host->dma_ops->exit)
host->dma_ops->exit(host);
+ if (host->vmmc) {
+ regulator_disable(host->vmmc);
+ regulator_put(host->vmmc);
+ }
+
iounmap(host->regs);
kfree(host);
@@ -1729,6 +1769,9 @@ static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
int i, ret;
struct dw_mci *host = platform_get_drvdata(pdev);
+ if (host->vmmc)
+ regulator_enable(host->vmmc);
+
for (i = 0; i < host->num_slots; i++) {
struct dw_mci_slot *slot = host->slot[i];
if (!slot)
@@ -1744,6 +1787,9 @@ static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
}
}
+ if (host->vmmc)
+ regulator_disable(host->vmmc);
+
return 0;
}
@@ -1752,6 +1798,23 @@ static int dw_mci_resume(struct platform_device *pdev)
int i, ret;
struct dw_mci *host = platform_get_drvdata(pdev);
+ if (host->dma_ops->init)
+ host->dma_ops->init(host);
+
+ if (!mci_wait_reset(&pdev->dev, host)) {
+ ret = -ENODEV;
+ return ret;
+ }
+
+ /* Restore the old value at FIFOTH register */
+ mci_writel(host, FIFOTH, host->fifoth_val);
+
+ mci_writel(host, RINTSTS, 0xFFFFFFFF);
+ mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
+ SDMMC_INT_TXDR | SDMMC_INT_RXDR |
+ DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
+ mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
+
for (i = 0; i < host->num_slots; i++) {
struct dw_mci_slot *slot = host->slot[i];
if (!slot)
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 5dd55a75233..23c662af561 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -43,6 +43,7 @@
#define SDMMC_USRID 0x068
#define SDMMC_VERID 0x06c
#define SDMMC_HCON 0x070
+#define SDMMC_UHS_REG 0x074
#define SDMMC_BMOD 0x080
#define SDMMC_PLDMND 0x084
#define SDMMC_DBADDR 0x088
@@ -51,7 +52,6 @@
#define SDMMC_DSCADDR 0x094
#define SDMMC_BUFADDR 0x098
#define SDMMC_DATA 0x100
-#define SDMMC_DATA_ADR 0x100
/* shift bit field */
#define _SBF(f, v) ((v) << (f))
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index fd877f633dd..2f7fc0c5146 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1516,21 +1516,17 @@ static int __devexit mmc_spi_remove(struct spi_device *spi)
return 0;
}
-#if defined(CONFIG_OF)
static struct of_device_id mmc_spi_of_match_table[] __devinitdata = {
{ .compatible = "mmc-spi-slot", },
{},
};
-#endif
static struct spi_driver mmc_spi_driver = {
.driver = {
.name = "mmc_spi",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
-#if defined(CONFIG_OF)
.of_match_table = mmc_spi_of_match_table,
-#endif
},
.probe = mmc_spi_probe,
.remove = __devexit_p(mmc_spi_remove),
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 2d6de3e03e2..b4a7e4fba90 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -2,7 +2,7 @@
* linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
*
* Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
- * Copyright (C) 2010 ST-Ericsson AB.
+ * Copyright (C) 2010 ST-Ericsson SA
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -25,8 +25,10 @@
#include <linux/clk.h>
#include <linux/scatterlist.h>
#include <linux/gpio.h>
-#include <linux/amba/mmci.h>
#include <linux/regulator/consumer.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/amba/mmci.h>
#include <asm/div64.h>
#include <asm/io.h>
@@ -66,6 +68,12 @@ static struct variant_data variant_arm = {
.datalength_bits = 16,
};
+static struct variant_data variant_arm_extended_fifo = {
+ .fifosize = 128 * 4,
+ .fifohalfsize = 64 * 4,
+ .datalength_bits = 16,
+};
+
static struct variant_data variant_u300 = {
.fifosize = 16 * 4,
.fifohalfsize = 8 * 4,
@@ -142,9 +150,6 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
host->mrq = NULL;
host->cmd = NULL;
- if (mrq->data)
- mrq->data->bytes_xfered = host->data_xfered;
-
/*
* Need to drop the host lock here; mmc_request_done may call
* back into the driver...
@@ -189,6 +194,248 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
}
+/*
+ * All the DMA operation mode stuff goes inside this ifdef.
+ * This assumes that you have a generic DMA device interface,
+ * no custom DMA interfaces are supported.
+ */
+#ifdef CONFIG_DMA_ENGINE
+static void __devinit mmci_dma_setup(struct mmci_host *host)
+{
+ struct mmci_platform_data *plat = host->plat;
+ const char *rxname, *txname;
+ dma_cap_mask_t mask;
+
+ if (!plat || !plat->dma_filter) {
+ dev_info(mmc_dev(host->mmc), "no DMA platform data\n");
+ return;
+ }
+
+ /* Try to acquire a generic DMA engine slave channel */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /*
+ * If only an RX channel is specified, the driver will
+ * attempt to use it bidirectionally, however if it is
+ * is specified but cannot be located, DMA will be disabled.
+ */
+ if (plat->dma_rx_param) {
+ host->dma_rx_channel = dma_request_channel(mask,
+ plat->dma_filter,
+ plat->dma_rx_param);
+ /* E.g if no DMA hardware is present */
+ if (!host->dma_rx_channel)
+ dev_err(mmc_dev(host->mmc), "no RX DMA channel\n");
+ }
+
+ if (plat->dma_tx_param) {
+ host->dma_tx_channel = dma_request_channel(mask,
+ plat->dma_filter,
+ plat->dma_tx_param);
+ if (!host->dma_tx_channel)
+ dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n");
+ } else {
+ host->dma_tx_channel = host->dma_rx_channel;
+ }
+
+ if (host->dma_rx_channel)
+ rxname = dma_chan_name(host->dma_rx_channel);
+ else
+ rxname = "none";
+
+ if (host->dma_tx_channel)
+ txname = dma_chan_name(host->dma_tx_channel);
+ else
+ txname = "none";
+
+ dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
+ rxname, txname);
+
+ /*
+ * Limit the maximum segment size in any SG entry according to
+ * the parameters of the DMA engine device.
+ */
+ if (host->dma_tx_channel) {
+ struct device *dev = host->dma_tx_channel->device->dev;
+ unsigned int max_seg_size = dma_get_max_seg_size(dev);
+
+ if (max_seg_size < host->mmc->max_seg_size)
+ host->mmc->max_seg_size = max_seg_size;
+ }
+ if (host->dma_rx_channel) {
+ struct device *dev = host->dma_rx_channel->device->dev;
+ unsigned int max_seg_size = dma_get_max_seg_size(dev);
+
+ if (max_seg_size < host->mmc->max_seg_size)
+ host->mmc->max_seg_size = max_seg_size;
+ }
+}
+
+/*
+ * This is used in __devinit or __devexit so inline it
+ * so it can be discarded.
+ */
+static inline void mmci_dma_release(struct mmci_host *host)
+{
+ struct mmci_platform_data *plat = host->plat;
+
+ if (host->dma_rx_channel)
+ dma_release_channel(host->dma_rx_channel);
+ if (host->dma_tx_channel && plat->dma_tx_param)
+ dma_release_channel(host->dma_tx_channel);
+ host->dma_rx_channel = host->dma_tx_channel = NULL;
+}
+
+static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
+{
+ struct dma_chan *chan = host->dma_current;
+ enum dma_data_direction dir;
+ u32 status;
+ int i;
+
+ /* Wait up to 1ms for the DMA to complete */
+ for (i = 0; ; i++) {
+ status = readl(host->base + MMCISTATUS);
+ if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
+ break;
+ udelay(10);
+ }
+
+ /*
+ * Check to see whether we still have some data left in the FIFO -
+ * this catches DMA controllers which are unable to monitor the
+ * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
+ * contiguous buffers. On TX, we'll get a FIFO underrun error.
+ */
+ if (status & MCI_RXDATAAVLBLMASK) {
+ dmaengine_terminate_all(chan);
+ if (!data->error)
+ data->error = -EIO;
+ }
+
+ if (data->flags & MMC_DATA_WRITE) {
+ dir = DMA_TO_DEVICE;
+ } else {
+ dir = DMA_FROM_DEVICE;
+ }
+
+ dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
+
+ /*
+ * Use of DMA with scatter-gather is impossible.
+ * Give up with DMA and switch back to PIO mode.
+ */
+ if (status & MCI_RXDATAAVLBLMASK) {
+ dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
+ mmci_dma_release(host);
+ }
+}
+
+static void mmci_dma_data_error(struct mmci_host *host)
+{
+ dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
+ dmaengine_terminate_all(host->dma_current);
+}
+
+static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+{
+ struct variant_data *variant = host->variant;
+ struct dma_slave_config conf = {
+ .src_addr = host->phybase + MMCIFIFO,
+ .dst_addr = host->phybase + MMCIFIFO,
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
+ .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
+ };
+ struct mmc_data *data = host->data;
+ struct dma_chan *chan;
+ struct dma_device *device;
+ struct dma_async_tx_descriptor *desc;
+ int nr_sg;
+
+ host->dma_current = NULL;
+
+ if (data->flags & MMC_DATA_READ) {
+ conf.direction = DMA_FROM_DEVICE;
+ chan = host->dma_rx_channel;
+ } else {
+ conf.direction = DMA_TO_DEVICE;
+ chan = host->dma_tx_channel;
+ }
+
+ /* If there's no DMA channel, fall back to PIO */
+ if (!chan)
+ return -EINVAL;
+
+ /* If less than or equal to the fifo size, don't bother with DMA */
+ if (host->size <= variant->fifosize)
+ return -EINVAL;
+
+ device = chan->device;
+ nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction);
+ if (nr_sg == 0)
+ return -EINVAL;
+
+ dmaengine_slave_config(chan, &conf);
+ desc = device->device_prep_slave_sg(chan, data->sg, nr_sg,
+ conf.direction, DMA_CTRL_ACK);
+ if (!desc)
+ goto unmap_exit;
+
+ /* Okay, go for it. */
+ host->dma_current = chan;
+
+ dev_vdbg(mmc_dev(host->mmc),
+ "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
+ data->sg_len, data->blksz, data->blocks, data->flags);
+ dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
+
+ datactrl |= MCI_DPSM_DMAENABLE;
+
+ /* Trigger the DMA transfer */
+ writel(datactrl, host->base + MMCIDATACTRL);
+
+ /*
+ * Let the MMCI say when the data is ended and it's time
+ * to fire next DMA request. When that happens, MMCI will
+ * call mmci_data_end()
+ */
+ writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
+ host->base + MMCIMASK0);
+ return 0;
+
+unmap_exit:
+ dmaengine_terminate_all(chan);
+ dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
+ return -ENOMEM;
+}
+#else
+/* Blank functions if the DMA engine is not available */
+static inline void mmci_dma_setup(struct mmci_host *host)
+{
+}
+
+static inline void mmci_dma_release(struct mmci_host *host)
+{
+}
+
+static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
+{
+}
+
+static inline void mmci_dma_data_error(struct mmci_host *host)
+{
+}
+
+static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+{
+ return -ENOSYS;
+}
+#endif
+
static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
{
struct variant_data *variant = host->variant;
@@ -202,9 +449,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
host->data = data;
host->size = data->blksz * data->blocks;
- host->data_xfered = 0;
-
- mmci_init_sg(host, data);
+ data->bytes_xfered = 0;
clks = (unsigned long long)data->timeout_ns * host->cclk;
do_div(clks, 1000000000UL);
@@ -219,15 +464,29 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
BUG_ON(1 << blksz_bits != data->blksz);
datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
- if (data->flags & MMC_DATA_READ) {
+
+ if (data->flags & MMC_DATA_READ)
datactrl |= MCI_DPSM_DIRECTION;
+
+ /*
+ * Attempt to use DMA operation mode, if this
+ * should fail, fall back to PIO mode
+ */
+ if (!mmci_dma_start_data(host, datactrl))
+ return;
+
+ /* IRQ mode, map the SG list for CPU reading/writing */
+ mmci_init_sg(host, data);
+
+ if (data->flags & MMC_DATA_READ) {
irqmask = MCI_RXFIFOHALFFULLMASK;
/*
- * If we have less than a FIFOSIZE of bytes to transfer,
- * trigger a PIO interrupt as soon as any data is available.
+ * If we have less than the fifo 'half-full' threshold to
+ * transfer, trigger a PIO interrupt as soon as any data
+ * is available.
*/
- if (host->size < variant->fifosize)
+ if (host->size < variant->fifohalfsize)
irqmask |= MCI_RXDATAAVLBLMASK;
} else {
/*
@@ -283,49 +542,51 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
u32 remain, success;
- /* Calculate how far we are into the transfer */
+ /* Terminate the DMA transfer */
+ if (dma_inprogress(host))
+ mmci_dma_data_error(host);
+
+ /*
+ * Calculate how far we are into the transfer. Note that
+ * the data counter gives the number of bytes transferred
+ * on the MMC bus, not on the host side. On reads, this
+ * can be as much as a FIFO-worth of data ahead. This
+ * matters for FIFO overruns only.
+ */
remain = readl(host->base + MMCIDATACNT);
success = data->blksz * data->blocks - remain;
- dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status);
+ dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
+ status, success);
if (status & MCI_DATACRCFAIL) {
/* Last block was not successful */
- host->data_xfered = round_down(success - 1, data->blksz);
+ success -= 1;
data->error = -EILSEQ;
} else if (status & MCI_DATATIMEOUT) {
- host->data_xfered = round_down(success, data->blksz);
data->error = -ETIMEDOUT;
- } else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
- host->data_xfered = round_down(success, data->blksz);
+ } else if (status & MCI_TXUNDERRUN) {
+ data->error = -EIO;
+ } else if (status & MCI_RXOVERRUN) {
+ if (success > host->variant->fifosize)
+ success -= host->variant->fifosize;
+ else
+ success = 0;
data->error = -EIO;
}
-
- /*
- * We hit an error condition. Ensure that any data
- * partially written to a page is properly coherent.
- */
- if (data->flags & MMC_DATA_READ) {
- struct sg_mapping_iter *sg_miter = &host->sg_miter;
- unsigned long flags;
-
- local_irq_save(flags);
- if (sg_miter_next(sg_miter)) {
- flush_dcache_page(sg_miter->page);
- sg_miter_stop(sg_miter);
- }
- local_irq_restore(flags);
- }
+ data->bytes_xfered = round_down(success, data->blksz);
}
if (status & MCI_DATABLOCKEND)
dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
if (status & MCI_DATAEND || data->error) {
+ if (dma_inprogress(host))
+ mmci_dma_unmap(host, data);
mmci_stop_data(host);
if (!data->error)
/* The error clause is handled above, success! */
- host->data_xfered += data->blksz * data->blocks;
+ data->bytes_xfered = data->blksz * data->blocks;
if (!data->stop) {
mmci_request_end(host, data->mrq);
@@ -498,9 +759,6 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
if (remain)
break;
- if (status & MCI_RXACTIVE)
- flush_dcache_page(sg_miter->page);
-
status = readl(base + MMCISTATUS);
} while (1);
@@ -509,10 +767,10 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
local_irq_restore(flags);
/*
- * If we're nearing the end of the read, switch to
- * "any data available" mode.
+ * If we have less than the fifo 'half-full' threshold to transfer,
+ * trigger a PIO interrupt as soon as any data is available.
*/
- if (status & MCI_RXACTIVE && host->size < variant->fifosize)
+ if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
/*
@@ -713,7 +971,8 @@ static const struct mmc_host_ops mmci_ops = {
.get_cd = mmci_get_cd,
};
-static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
+static int __devinit mmci_probe(struct amba_device *dev,
+ const struct amba_id *id)
{
struct mmci_platform_data *plat = dev->dev.platform_data;
struct variant_data *variant = id->data;
@@ -776,6 +1035,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
host->mclk);
}
+ host->phybase = dev->res.start;
host->base = ioremap(dev->res.start, resource_size(&dev->res));
if (!host->base) {
ret = -ENOMEM;
@@ -903,9 +1163,12 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
amba_set_drvdata(dev, mmc);
- dev_info(&dev->dev, "%s: PL%03x rev%u at 0x%08llx irq %d,%d\n",
- mmc_hostname(mmc), amba_part(dev), amba_rev(dev),
- (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]);
+ dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
+ mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
+ amba_rev(dev), (unsigned long long)dev->res.start,
+ dev->irq[0], dev->irq[1]);
+
+ mmci_dma_setup(host);
mmc_add_host(mmc);
@@ -952,6 +1215,7 @@ static int __devexit mmci_remove(struct amba_device *dev)
writel(0, host->base + MMCICOMMAND);
writel(0, host->base + MMCIDATACTRL);
+ mmci_dma_release(host);
free_irq(dev->irq[0], host);
if (!host->singleirq)
free_irq(dev->irq[1], host);
@@ -1019,10 +1283,15 @@ static int mmci_resume(struct amba_device *dev)
static struct amba_id mmci_ids[] = {
{
.id = 0x00041180,
- .mask = 0x000fffff,
+ .mask = 0xff0fffff,
.data = &variant_arm,
},
{
+ .id = 0x01041180,
+ .mask = 0xff0fffff,
+ .data = &variant_arm_extended_fifo,
+ },
+ {
.id = 0x00041181,
.mask = 0x000fffff,
.data = &variant_arm,
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index c1df7b82d36..ec9a7bc6d0d 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -148,8 +148,10 @@
struct clk;
struct variant_data;
+struct dma_chan;
struct mmci_host {
+ phys_addr_t phybase;
void __iomem *base;
struct mmc_request *mrq;
struct mmc_command *cmd;
@@ -161,8 +163,6 @@ struct mmci_host {
int gpio_cd_irq;
bool singleirq;
- unsigned int data_xfered;
-
spinlock_t lock;
unsigned int mclk;
@@ -181,5 +181,16 @@ struct mmci_host {
struct sg_mapping_iter sg_miter;
unsigned int size;
struct regulator *vcc;
+
+#ifdef CONFIG_DMA_ENGINE
+ /* DMA stuff */
+ struct dma_chan *dma_current;
+ struct dma_chan *dma_rx_channel;
+ struct dma_chan *dma_tx_channel;
+
+#define dma_inprogress(host) ((host)->dma_current)
+#else
+#define dma_inprogress(host) (0)
+#endif
};
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 153ab977a01..a4c865a5286 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -36,6 +36,7 @@
#include <linux/io.h>
#include <linux/memory.h>
#include <linux/gfp.h>
+#include <linux/gpio.h>
#include <asm/cacheflush.h>
#include <asm/div64.h>
@@ -266,14 +267,6 @@ msmsdcc_dma_complete_tlet(unsigned long data)
dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents,
host->dma.dir);
- if (host->curr.user_pages) {
- struct scatterlist *sg = host->dma.sg;
- int i;
-
- for (i = 0; i < host->dma.num_ents; i++)
- flush_dcache_page(sg_page(sg++));
- }
-
host->dma.sg = NULL;
host->dma.busy = 0;
@@ -941,6 +934,38 @@ msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq)
spin_unlock_irqrestore(&host->lock, flags);
}
+static void msmsdcc_setup_gpio(struct msmsdcc_host *host, bool enable)
+{
+ struct msm_mmc_gpio_data *curr;
+ int i, rc = 0;
+
+ if (!host->plat->gpio_data && host->gpio_config_status == enable)
+ return;
+
+ curr = host->plat->gpio_data;
+ for (i = 0; i < curr->size; i++) {
+ if (enable) {
+ rc = gpio_request(curr->gpio[i].no,
+ curr->gpio[i].name);
+ if (rc) {
+ pr_err("%s: gpio_request(%d, %s) failed %d\n",
+ mmc_hostname(host->mmc),
+ curr->gpio[i].no,
+ curr->gpio[i].name, rc);
+ goto free_gpios;
+ }
+ } else {
+ gpio_free(curr->gpio[i].no);
+ }
+ }
+ host->gpio_config_status = enable;
+ return;
+
+free_gpios:
+ for (; i >= 0; i--)
+ gpio_free(curr->gpio[i].no);
+}
+
static void
msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
@@ -953,6 +978,8 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
msmsdcc_enable_clocks(host);
+ spin_unlock_irqrestore(&host->lock, flags);
+
if (ios->clock) {
if (ios->clock != host->clk_rate) {
rc = clk_set_rate(host->clk, ios->clock);
@@ -979,9 +1006,11 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
switch (ios->power_mode) {
case MMC_POWER_OFF:
+ msmsdcc_setup_gpio(host, false);
break;
case MMC_POWER_UP:
pwr |= MCI_PWR_UP;
+ msmsdcc_setup_gpio(host, true);
break;
case MMC_POWER_ON:
pwr |= MCI_PWR_ON;
@@ -998,9 +1027,10 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
msmsdcc_writel(host, pwr, MMCIPOWER);
}
#if BUSCLK_PWRSAVE
+ spin_lock_irqsave(&host->lock, flags);
msmsdcc_disable_clocks(host, 1);
-#endif
spin_unlock_irqrestore(&host->lock, flags);
+#endif
}
static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
index 939557af266..42d7bbc977c 100644
--- a/drivers/mmc/host/msm_sdcc.h
+++ b/drivers/mmc/host/msm_sdcc.h
@@ -243,6 +243,7 @@ struct msmsdcc_host {
unsigned int cmd_datactrl;
struct mmc_command *cmd_cmd;
u32 cmd_c;
+ bool gpio_config_status;
bool prog_scan;
bool prog_enable;
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 4428594261c..cc20e025932 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -32,16 +32,14 @@
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
+#include <linux/dmaengine.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/sizes.h>
#include <mach/mmc.h>
-#ifdef CONFIG_ARCH_MX2
-#include <mach/dma-mx1-mx2.h>
-#define HAS_DMA
-#endif
+#include <mach/dma.h>
#define DRIVER_NAME "mxc-mmc"
@@ -118,7 +116,8 @@ struct mxcmci_host {
void __iomem *base;
int irq;
int detect_irq;
- int dma;
+ struct dma_chan *dma;
+ struct dma_async_tx_descriptor *desc;
int do_dma;
int default_irq_mask;
int use_sdio;
@@ -129,7 +128,6 @@ struct mxcmci_host {
struct mmc_command *cmd;
struct mmc_data *data;
- unsigned int dma_nents;
unsigned int datasize;
unsigned int dma_dir;
@@ -144,6 +142,11 @@ struct mxcmci_host {
spinlock_t lock;
struct regulator *vcc;
+
+ int burstlen;
+ int dmareq;
+ struct dma_slave_config dma_slave_config;
+ struct imx_dma_data dma_data;
};
static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
@@ -206,17 +209,16 @@ static void mxcmci_softreset(struct mxcmci_host *host)
writew(0xff, host->base + MMC_REG_RES_TO);
}
+static int mxcmci_setup_dma(struct mmc_host *mmc);
static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
{
unsigned int nob = data->blocks;
unsigned int blksz = data->blksz;
unsigned int datasize = nob * blksz;
-#ifdef HAS_DMA
struct scatterlist *sg;
- int i;
- int ret;
-#endif
+ int i, nents;
+
if (data->flags & MMC_DATA_STREAM)
nob = 0xffff;
@@ -227,7 +229,9 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
writew(blksz, host->base + MMC_REG_BLK_LEN);
host->datasize = datasize;
-#ifdef HAS_DMA
+ if (!mxcmci_use_dma(host))
+ return 0;
+
for_each_sg(data->sg, sg, data->sg_len, i) {
if (sg->offset & 3 || sg->length & 3) {
host->do_dma = 0;
@@ -235,34 +239,30 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
}
}
- if (data->flags & MMC_DATA_READ) {
+ if (data->flags & MMC_DATA_READ)
host->dma_dir = DMA_FROM_DEVICE;
- host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, host->dma_dir);
-
- ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents,
- datasize,
- host->res->start + MMC_REG_BUFFER_ACCESS,
- DMA_MODE_READ);
- } else {
+ else
host->dma_dir = DMA_TO_DEVICE;
- host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, host->dma_dir);
- ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents,
- datasize,
- host->res->start + MMC_REG_BUFFER_ACCESS,
- DMA_MODE_WRITE);
- }
+ nents = dma_map_sg(host->dma->device->dev, data->sg,
+ data->sg_len, host->dma_dir);
+ if (nents != data->sg_len)
+ return -EINVAL;
+
+ host->desc = host->dma->device->device_prep_slave_sg(host->dma,
+ data->sg, data->sg_len, host->dma_dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (ret) {
- dev_err(mmc_dev(host->mmc), "failed to setup DMA : %d\n", ret);
- return ret;
+ if (!host->desc) {
+ dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
+ host->dma_dir);
+ host->do_dma = 0;
+ return 0; /* Fall back to PIO */
}
wmb();
- imx_dma_enable(host->dma);
-#endif /* HAS_DMA */
+ dmaengine_submit(host->desc);
+
return 0;
}
@@ -337,13 +337,11 @@ static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
struct mmc_data *data = host->data;
int data_error;
-#ifdef HAS_DMA
if (mxcmci_use_dma(host)) {
- imx_dma_disable(host->dma);
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents,
+ dmaengine_terminate_all(host->dma);
+ dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
host->dma_dir);
}
-#endif
if (stat & STATUS_ERR_MASK) {
dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
@@ -545,7 +543,6 @@ static void mxcmci_datawork(struct work_struct *work)
}
}
-#ifdef HAS_DMA
static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)
{
struct mmc_data *data = host->data;
@@ -568,7 +565,6 @@ static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)
mxcmci_finish_request(host, host->req);
}
}
-#endif /* HAS_DMA */
static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
{
@@ -606,12 +602,10 @@ static irqreturn_t mxcmci_irq(int irq, void *devid)
sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;
spin_unlock_irqrestore(&host->lock, flags);
-#ifdef HAS_DMA
if (mxcmci_use_dma(host) &&
(stat & (STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE)))
writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
host->base + MMC_REG_STATUS);
-#endif
if (sdio_irq) {
writel(STATUS_SDIO_INT_ACTIVE, host->base + MMC_REG_STATUS);
@@ -621,14 +615,14 @@ static irqreturn_t mxcmci_irq(int irq, void *devid)
if (stat & STATUS_END_CMD_RESP)
mxcmci_cmd_done(host, stat);
-#ifdef HAS_DMA
if (mxcmci_use_dma(host) &&
(stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE)))
mxcmci_data_done(host, stat);
-#endif
+
if (host->default_irq_mask &&
(stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL)))
mmc_detect_change(host->mmc, msecs_to_jiffies(200));
+
return IRQ_HANDLED;
}
@@ -642,9 +636,10 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
host->req = req;
host->cmdat &= ~CMD_DAT_CONT_INIT;
-#ifdef HAS_DMA
- host->do_dma = 1;
-#endif
+
+ if (host->dma)
+ host->do_dma = 1;
+
if (req->data) {
error = mxcmci_setup_data(host, req->data);
if (error) {
@@ -660,6 +655,7 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
}
error = mxcmci_start_cmd(host, req->cmd, cmdat);
+
out:
if (error)
mxcmci_finish_request(host, req);
@@ -698,22 +694,46 @@ static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)
prescaler, divider, clk_in, clk_ios);
}
+static int mxcmci_setup_dma(struct mmc_host *mmc)
+{
+ struct mxcmci_host *host = mmc_priv(mmc);
+ struct dma_slave_config *config = &host->dma_slave_config;
+
+ config->dst_addr = host->res->start + MMC_REG_BUFFER_ACCESS;
+ config->src_addr = host->res->start + MMC_REG_BUFFER_ACCESS;
+ config->dst_addr_width = 4;
+ config->src_addr_width = 4;
+ config->dst_maxburst = host->burstlen;
+ config->src_maxburst = host->burstlen;
+
+ return dmaengine_slave_config(host->dma, config);
+}
+
static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct mxcmci_host *host = mmc_priv(mmc);
-#ifdef HAS_DMA
- unsigned int blen;
+ int burstlen, ret;
+
/*
* use burstlen of 64 in 4 bit mode (--> reg value 0)
* use burstlen of 16 in 1 bit mode (--> reg value 16)
*/
if (ios->bus_width == MMC_BUS_WIDTH_4)
- blen = 0;
+ burstlen = 64;
else
- blen = 16;
+ burstlen = 16;
+
+ if (mxcmci_use_dma(host) && burstlen != host->burstlen) {
+ host->burstlen = burstlen;
+ ret = mxcmci_setup_dma(mmc);
+ if (ret) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to config DMA channel. Falling back to PIO\n");
+ dma_release_channel(host->dma);
+ host->do_dma = 0;
+ }
+ }
- imx_dma_config_burstlen(host->dma, blen);
-#endif
if (ios->bus_width == MMC_BUS_WIDTH_4)
host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
else
@@ -794,6 +814,18 @@ static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
host->caps |= MMC_CAP_4_BIT_DATA;
}
+static bool filter(struct dma_chan *chan, void *param)
+{
+ struct mxcmci_host *host = param;
+
+ if (!imx_dma_is_general_purpose(chan))
+ return false;
+
+ chan->private = &host->dma_data;
+
+ return true;
+}
+
static const struct mmc_host_ops mxcmci_ops = {
.request = mxcmci_request,
.set_ios = mxcmci_set_ios,
@@ -808,6 +840,7 @@ static int mxcmci_probe(struct platform_device *pdev)
struct mxcmci_host *host = NULL;
struct resource *iores, *r;
int ret = 0, irq;
+ dma_cap_mask_t mask;
printk(KERN_INFO "i.MX SDHC driver\n");
@@ -883,29 +916,23 @@ static int mxcmci_probe(struct platform_device *pdev)
writel(host->default_irq_mask, host->base + MMC_REG_INT_CNTR);
-#ifdef HAS_DMA
- host->dma = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_LOW);
- if (host->dma < 0) {
- dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n");
- ret = -EBUSY;
- goto out_clk_put;
- }
-
r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!r) {
- ret = -EINVAL;
- goto out_free_dma;
+ if (r) {
+ host->dmareq = r->start;
+ host->dma_data.peripheral_type = IMX_DMATYPE_SDHC;
+ host->dma_data.priority = DMA_PRIO_LOW;
+ host->dma_data.dma_request = host->dmareq;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ host->dma = dma_request_channel(mask, filter, host);
+ if (host->dma)
+ mmc->max_seg_size = dma_get_max_seg_size(
+ host->dma->device->dev);
}
- ret = imx_dma_config_channel(host->dma,
- IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_FIFO,
- IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
- r->start, 0);
- if (ret) {
- dev_err(mmc_dev(host->mmc), "failed to config DMA channel\n");
- goto out_free_dma;
- }
-#endif
+ if (!host->dma)
+ dev_info(mmc_dev(host->mmc), "dma not available. Using PIO\n");
+
INIT_WORK(&host->datawork, mxcmci_datawork);
ret = request_irq(host->irq, mxcmci_irq, 0, DRIVER_NAME, host);
@@ -928,9 +955,8 @@ static int mxcmci_probe(struct platform_device *pdev)
out_free_irq:
free_irq(host->irq, host);
out_free_dma:
-#ifdef HAS_DMA
- imx_dma_free(host->dma);
-#endif
+ if (host->dma)
+ dma_release_channel(host->dma);
out_clk_put:
clk_disable(host->clk);
clk_put(host->clk);
@@ -960,9 +986,10 @@ static int mxcmci_remove(struct platform_device *pdev)
free_irq(host->irq, host);
iounmap(host->base);
-#ifdef HAS_DMA
- imx_dma_free(host->dma);
-#endif
+
+ if (host->dma)
+ dma_release_channel(host->dma);
+
clk_disable(host->clk);
clk_put(host->clk);
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
new file mode 100644
index 00000000000..99d39a6a103
--- /dev/null
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -0,0 +1,874 @@
+/*
+ * Portions copyright (C) 2003 Russell King, PXA MMCI Driver
+ * Portions copyright (C) 2004-2005 Pierre Ossman, W83L51xD SD/MMC driver
+ *
+ * Copyright 2008 Embedded Alley Solutions, Inc.
+ * Copyright 2009-2011 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/highmem.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/completion.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+
+#include <mach/mxs.h>
+#include <mach/common.h>
+#include <mach/dma.h>
+#include <mach/mmc.h>
+
+#define DRIVER_NAME "mxs-mmc"
+
+/* card detect polling timeout */
+#define MXS_MMC_DETECT_TIMEOUT (HZ/2)
+
+#define SSP_VERSION_LATEST 4
+#define ssp_is_old() (host->version < SSP_VERSION_LATEST)
+
+/* SSP registers */
+#define HW_SSP_CTRL0 0x000
+#define BM_SSP_CTRL0_RUN (1 << 29)
+#define BM_SSP_CTRL0_SDIO_IRQ_CHECK (1 << 28)
+#define BM_SSP_CTRL0_IGNORE_CRC (1 << 26)
+#define BM_SSP_CTRL0_READ (1 << 25)
+#define BM_SSP_CTRL0_DATA_XFER (1 << 24)
+#define BP_SSP_CTRL0_BUS_WIDTH (22)
+#define BM_SSP_CTRL0_BUS_WIDTH (0x3 << 22)
+#define BM_SSP_CTRL0_WAIT_FOR_IRQ (1 << 21)
+#define BM_SSP_CTRL0_LONG_RESP (1 << 19)
+#define BM_SSP_CTRL0_GET_RESP (1 << 17)
+#define BM_SSP_CTRL0_ENABLE (1 << 16)
+#define BP_SSP_CTRL0_XFER_COUNT (0)
+#define BM_SSP_CTRL0_XFER_COUNT (0xffff)
+#define HW_SSP_CMD0 0x010
+#define BM_SSP_CMD0_DBL_DATA_RATE_EN (1 << 25)
+#define BM_SSP_CMD0_SLOW_CLKING_EN (1 << 22)
+#define BM_SSP_CMD0_CONT_CLKING_EN (1 << 21)
+#define BM_SSP_CMD0_APPEND_8CYC (1 << 20)
+#define BP_SSP_CMD0_BLOCK_SIZE (16)
+#define BM_SSP_CMD0_BLOCK_SIZE (0xf << 16)
+#define BP_SSP_CMD0_BLOCK_COUNT (8)
+#define BM_SSP_CMD0_BLOCK_COUNT (0xff << 8)
+#define BP_SSP_CMD0_CMD (0)
+#define BM_SSP_CMD0_CMD (0xff)
+#define HW_SSP_CMD1 0x020
+#define HW_SSP_XFER_SIZE 0x030
+#define HW_SSP_BLOCK_SIZE 0x040
+#define BP_SSP_BLOCK_SIZE_BLOCK_COUNT (4)
+#define BM_SSP_BLOCK_SIZE_BLOCK_COUNT (0xffffff << 4)
+#define BP_SSP_BLOCK_SIZE_BLOCK_SIZE (0)
+#define BM_SSP_BLOCK_SIZE_BLOCK_SIZE (0xf)
+#define HW_SSP_TIMING (ssp_is_old() ? 0x050 : 0x070)
+#define BP_SSP_TIMING_TIMEOUT (16)
+#define BM_SSP_TIMING_TIMEOUT (0xffff << 16)
+#define BP_SSP_TIMING_CLOCK_DIVIDE (8)
+#define BM_SSP_TIMING_CLOCK_DIVIDE (0xff << 8)
+#define BP_SSP_TIMING_CLOCK_RATE (0)
+#define BM_SSP_TIMING_CLOCK_RATE (0xff)
+#define HW_SSP_CTRL1 (ssp_is_old() ? 0x060 : 0x080)
+#define BM_SSP_CTRL1_SDIO_IRQ (1 << 31)
+#define BM_SSP_CTRL1_SDIO_IRQ_EN (1 << 30)
+#define BM_SSP_CTRL1_RESP_ERR_IRQ (1 << 29)
+#define BM_SSP_CTRL1_RESP_ERR_IRQ_EN (1 << 28)
+#define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ (1 << 27)
+#define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN (1 << 26)
+#define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ (1 << 25)
+#define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN (1 << 24)
+#define BM_SSP_CTRL1_DATA_CRC_IRQ (1 << 23)
+#define BM_SSP_CTRL1_DATA_CRC_IRQ_EN (1 << 22)
+#define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ (1 << 21)
+#define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ_EN (1 << 20)
+#define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ (1 << 17)
+#define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN (1 << 16)
+#define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ (1 << 15)
+#define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ_EN (1 << 14)
+#define BM_SSP_CTRL1_DMA_ENABLE (1 << 13)
+#define BM_SSP_CTRL1_POLARITY (1 << 9)
+#define BP_SSP_CTRL1_WORD_LENGTH (4)
+#define BM_SSP_CTRL1_WORD_LENGTH (0xf << 4)
+#define BP_SSP_CTRL1_SSP_MODE (0)
+#define BM_SSP_CTRL1_SSP_MODE (0xf)
+#define HW_SSP_SDRESP0 (ssp_is_old() ? 0x080 : 0x0a0)
+#define HW_SSP_SDRESP1 (ssp_is_old() ? 0x090 : 0x0b0)
+#define HW_SSP_SDRESP2 (ssp_is_old() ? 0x0a0 : 0x0c0)
+#define HW_SSP_SDRESP3 (ssp_is_old() ? 0x0b0 : 0x0d0)
+#define HW_SSP_STATUS (ssp_is_old() ? 0x0c0 : 0x100)
+#define BM_SSP_STATUS_CARD_DETECT (1 << 28)
+#define BM_SSP_STATUS_SDIO_IRQ (1 << 17)
+#define HW_SSP_VERSION (cpu_is_mx23() ? 0x110 : 0x130)
+#define BP_SSP_VERSION_MAJOR (24)
+
+#define BF_SSP(value, field) (((value) << BP_SSP_##field) & BM_SSP_##field)
+
+#define MXS_MMC_IRQ_BITS (BM_SSP_CTRL1_SDIO_IRQ | \
+ BM_SSP_CTRL1_RESP_ERR_IRQ | \
+ BM_SSP_CTRL1_RESP_TIMEOUT_IRQ | \
+ BM_SSP_CTRL1_DATA_TIMEOUT_IRQ | \
+ BM_SSP_CTRL1_DATA_CRC_IRQ | \
+ BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ | \
+ BM_SSP_CTRL1_RECV_TIMEOUT_IRQ | \
+ BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)
+
+#define SSP_PIO_NUM 3
+
+struct mxs_mmc_host {
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+
+ void __iomem *base;
+ int irq;
+ struct resource *res;
+ struct resource *dma_res;
+ struct clk *clk;
+ unsigned int clk_rate;
+
+ struct dma_chan *dmach;
+ struct mxs_dma_data dma_data;
+ unsigned int dma_dir;
+ u32 ssp_pio_words[SSP_PIO_NUM];
+
+ unsigned int version;
+ unsigned char bus_width;
+ spinlock_t lock;
+ int sdio_irq_en;
+};
+
+static int mxs_mmc_get_ro(struct mmc_host *mmc)
+{
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+ struct mxs_mmc_platform_data *pdata =
+ mmc_dev(host->mmc)->platform_data;
+
+ if (!pdata)
+ return -EFAULT;
+
+ if (!gpio_is_valid(pdata->wp_gpio))
+ return -EINVAL;
+
+ return gpio_get_value(pdata->wp_gpio);
+}
+
+static int mxs_mmc_get_cd(struct mmc_host *mmc)
+{
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+
+ return !(readl(host->base + HW_SSP_STATUS) &
+ BM_SSP_STATUS_CARD_DETECT);
+}
+
+static void mxs_mmc_reset(struct mxs_mmc_host *host)
+{
+ u32 ctrl0, ctrl1;
+
+ mxs_reset_block(host->base);
+
+ ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
+ ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
+ BF_SSP(0x7, CTRL1_WORD_LENGTH) |
+ BM_SSP_CTRL1_DMA_ENABLE |
+ BM_SSP_CTRL1_POLARITY |
+ BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN |
+ BM_SSP_CTRL1_DATA_CRC_IRQ_EN |
+ BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN |
+ BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN |
+ BM_SSP_CTRL1_RESP_ERR_IRQ_EN;
+
+ writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
+ BF_SSP(2, TIMING_CLOCK_DIVIDE) |
+ BF_SSP(0, TIMING_CLOCK_RATE),
+ host->base + HW_SSP_TIMING);
+
+ if (host->sdio_irq_en) {
+ ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
+ ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN;
+ }
+
+ writel(ctrl0, host->base + HW_SSP_CTRL0);
+ writel(ctrl1, host->base + HW_SSP_CTRL1);
+}
+
+static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
+ struct mmc_command *cmd);
+
+static void mxs_mmc_request_done(struct mxs_mmc_host *host)
+{
+ struct mmc_command *cmd = host->cmd;
+ struct mmc_data *data = host->data;
+ struct mmc_request *mrq = host->mrq;
+
+ if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
+ if (mmc_resp_type(cmd) & MMC_RSP_136) {
+ cmd->resp[3] = readl(host->base + HW_SSP_SDRESP0);
+ cmd->resp[2] = readl(host->base + HW_SSP_SDRESP1);
+ cmd->resp[1] = readl(host->base + HW_SSP_SDRESP2);
+ cmd->resp[0] = readl(host->base + HW_SSP_SDRESP3);
+ } else {
+ cmd->resp[0] = readl(host->base + HW_SSP_SDRESP0);
+ }
+ }
+
+ if (data) {
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, host->dma_dir);
+ /*
+ * If there was an error on any block, we mark all
+ * data blocks as being in error.
+ */
+ if (!data->error)
+ data->bytes_xfered = data->blocks * data->blksz;
+ else
+ data->bytes_xfered = 0;
+
+ host->data = NULL;
+ if (mrq->stop) {
+ mxs_mmc_start_cmd(host, mrq->stop);
+ return;
+ }
+ }
+
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, mrq);
+}
+
+static void mxs_mmc_dma_irq_callback(void *param)
+{
+ struct mxs_mmc_host *host = param;
+
+ mxs_mmc_request_done(host);
+}
+
+static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
+{
+ struct mxs_mmc_host *host = dev_id;
+ struct mmc_command *cmd = host->cmd;
+ struct mmc_data *data = host->data;
+ u32 stat;
+
+ spin_lock(&host->lock);
+
+ stat = readl(host->base + HW_SSP_CTRL1);
+ writel(stat & MXS_MMC_IRQ_BITS,
+ host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
+
+ if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
+ mmc_signal_sdio_irq(host->mmc);
+
+ spin_unlock(&host->lock);
+
+ if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
+ cmd->error = -ETIMEDOUT;
+ else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
+ cmd->error = -EIO;
+
+ if (data) {
+ if (stat & (BM_SSP_CTRL1_DATA_TIMEOUT_IRQ |
+ BM_SSP_CTRL1_RECV_TIMEOUT_IRQ))
+ data->error = -ETIMEDOUT;
+ else if (stat & BM_SSP_CTRL1_DATA_CRC_IRQ)
+ data->error = -EILSEQ;
+ else if (stat & (BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ |
+ BM_SSP_CTRL1_FIFO_OVERRUN_IRQ))
+ data->error = -EIO;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
+ struct mxs_mmc_host *host, unsigned int append)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct mmc_data *data = host->data;
+ struct scatterlist * sgl;
+ unsigned int sg_len;
+
+ if (data) {
+ /* data */
+ dma_map_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, host->dma_dir);
+ sgl = data->sg;
+ sg_len = data->sg_len;
+ } else {
+ /* pio */
+ sgl = (struct scatterlist *) host->ssp_pio_words;
+ sg_len = SSP_PIO_NUM;
+ }
+
+ desc = host->dmach->device->device_prep_slave_sg(host->dmach,
+ sgl, sg_len, host->dma_dir, append);
+ if (desc) {
+ desc->callback = mxs_mmc_dma_irq_callback;
+ desc->callback_param = host;
+ } else {
+ if (data)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, host->dma_dir);
+ }
+
+ return desc;
+}
+
+static void mxs_mmc_bc(struct mxs_mmc_host *host)
+{
+ struct mmc_command *cmd = host->cmd;
+ struct dma_async_tx_descriptor *desc;
+ u32 ctrl0, cmd0, cmd1;
+
+ ctrl0 = BM_SSP_CTRL0_ENABLE | BM_SSP_CTRL0_IGNORE_CRC;
+ cmd0 = BF_SSP(cmd->opcode, CMD0_CMD) | BM_SSP_CMD0_APPEND_8CYC;
+ cmd1 = cmd->arg;
+
+ if (host->sdio_irq_en) {
+ ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
+ cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
+ }
+
+ host->ssp_pio_words[0] = ctrl0;
+ host->ssp_pio_words[1] = cmd0;
+ host->ssp_pio_words[2] = cmd1;
+ host->dma_dir = DMA_NONE;
+ desc = mxs_mmc_prep_dma(host, 0);
+ if (!desc)
+ goto out;
+
+ dmaengine_submit(desc);
+ return;
+
+out:
+ dev_warn(mmc_dev(host->mmc),
+ "%s: failed to prep dma\n", __func__);
+}
+
+static void mxs_mmc_ac(struct mxs_mmc_host *host)
+{
+ struct mmc_command *cmd = host->cmd;
+ struct dma_async_tx_descriptor *desc;
+ u32 ignore_crc, get_resp, long_resp;
+ u32 ctrl0, cmd0, cmd1;
+
+ ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
+ 0 : BM_SSP_CTRL0_IGNORE_CRC;
+ get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
+ BM_SSP_CTRL0_GET_RESP : 0;
+ long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
+ BM_SSP_CTRL0_LONG_RESP : 0;
+
+ ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp;
+ cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
+ cmd1 = cmd->arg;
+
+ if (host->sdio_irq_en) {
+ ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
+ cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
+ }
+
+ host->ssp_pio_words[0] = ctrl0;
+ host->ssp_pio_words[1] = cmd0;
+ host->ssp_pio_words[2] = cmd1;
+ host->dma_dir = DMA_NONE;
+ desc = mxs_mmc_prep_dma(host, 0);
+ if (!desc)
+ goto out;
+
+ dmaengine_submit(desc);
+ return;
+
+out:
+ dev_warn(mmc_dev(host->mmc),
+ "%s: failed to prep dma\n", __func__);
+}
+
+static unsigned short mxs_ns_to_ssp_ticks(unsigned clock_rate, unsigned ns)
+{
+ const unsigned int ssp_timeout_mul = 4096;
+ /*
+ * Calculate ticks in ms since ns are large numbers
+ * and might overflow
+ */
+ const unsigned int clock_per_ms = clock_rate / 1000;
+ const unsigned int ms = ns / 1000;
+ const unsigned int ticks = ms * clock_per_ms;
+ const unsigned int ssp_ticks = ticks / ssp_timeout_mul;
+
+ WARN_ON(ssp_ticks == 0);
+ return ssp_ticks;
+}
+
+static void mxs_mmc_adtc(struct mxs_mmc_host *host)
+{
+ struct mmc_command *cmd = host->cmd;
+ struct mmc_data *data = cmd->data;
+ struct dma_async_tx_descriptor *desc;
+ struct scatterlist *sgl = data->sg, *sg;
+ unsigned int sg_len = data->sg_len;
+ int i;
+
+ unsigned short dma_data_dir, timeout;
+ unsigned int data_size = 0, log2_blksz;
+ unsigned int blocks = data->blocks;
+
+ u32 ignore_crc, get_resp, long_resp, read;
+ u32 ctrl0, cmd0, cmd1, val;
+
+ ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
+ 0 : BM_SSP_CTRL0_IGNORE_CRC;
+ get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
+ BM_SSP_CTRL0_GET_RESP : 0;
+ long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
+ BM_SSP_CTRL0_LONG_RESP : 0;
+
+ if (data->flags & MMC_DATA_WRITE) {
+ dma_data_dir = DMA_TO_DEVICE;
+ read = 0;
+ } else {
+ dma_data_dir = DMA_FROM_DEVICE;
+ read = BM_SSP_CTRL0_READ;
+ }
+
+ ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) |
+ ignore_crc | get_resp | long_resp |
+ BM_SSP_CTRL0_DATA_XFER | read |
+ BM_SSP_CTRL0_WAIT_FOR_IRQ |
+ BM_SSP_CTRL0_ENABLE;
+
+ cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
+
+ /* get logarithm to base 2 of block size for setting register */
+ log2_blksz = ilog2(data->blksz);
+
+ /*
+ * take special care of the case that data size from data->sg
+ * is not equal to blocks x blksz
+ */
+ for_each_sg(sgl, sg, sg_len, i)
+ data_size += sg->length;
+
+ if (data_size != data->blocks * data->blksz)
+ blocks = 1;
+
+ /* xfer count, block size and count need to be set differently */
+ if (ssp_is_old()) {
+ ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
+ cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
+ BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
+ } else {
+ writel(data_size, host->base + HW_SSP_XFER_SIZE);
+ writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) |
+ BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT),
+ host->base + HW_SSP_BLOCK_SIZE);
+ }
+
+ if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
+ (cmd->opcode == SD_IO_RW_EXTENDED))
+ cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
+
+ cmd1 = cmd->arg;
+
+ if (host->sdio_irq_en) {
+ ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
+ cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
+ }
+
+ /* set the timeout count */
+ timeout = mxs_ns_to_ssp_ticks(host->clk_rate, data->timeout_ns);
+ val = readl(host->base + HW_SSP_TIMING);
+ val &= ~(BM_SSP_TIMING_TIMEOUT);
+ val |= BF_SSP(timeout, TIMING_TIMEOUT);
+ writel(val, host->base + HW_SSP_TIMING);
+
+ /* pio */
+ host->ssp_pio_words[0] = ctrl0;
+ host->ssp_pio_words[1] = cmd0;
+ host->ssp_pio_words[2] = cmd1;
+ host->dma_dir = DMA_NONE;
+ desc = mxs_mmc_prep_dma(host, 0);
+ if (!desc)
+ goto out;
+
+ /* append data sg */
+ WARN_ON(host->data != NULL);
+ host->data = data;
+ host->dma_dir = dma_data_dir;
+ desc = mxs_mmc_prep_dma(host, 1);
+ if (!desc)
+ goto out;
+
+ dmaengine_submit(desc);
+ return;
+out:
+ dev_warn(mmc_dev(host->mmc),
+ "%s: failed to prep dma\n", __func__);
+}
+
+static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
+ struct mmc_command *cmd)
+{
+ host->cmd = cmd;
+
+ switch (mmc_cmd_type(cmd)) {
+ case MMC_CMD_BC:
+ mxs_mmc_bc(host);
+ break;
+ case MMC_CMD_BCR:
+ mxs_mmc_ac(host);
+ break;
+ case MMC_CMD_AC:
+ mxs_mmc_ac(host);
+ break;
+ case MMC_CMD_ADTC:
+ mxs_mmc_adtc(host);
+ break;
+ default:
+ dev_warn(mmc_dev(host->mmc),
+ "%s: unknown MMC command\n", __func__);
+ break;
+ }
+}
+
+static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+
+ WARN_ON(host->mrq != NULL);
+ host->mrq = mrq;
+ mxs_mmc_start_cmd(host, mrq->cmd);
+}
+
+static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate)
+{
+ unsigned int ssp_rate, bit_rate;
+ u32 div1, div2;
+ u32 val;
+
+ ssp_rate = clk_get_rate(host->clk);
+
+ for (div1 = 2; div1 < 254; div1 += 2) {
+ div2 = ssp_rate / rate / div1;
+ if (div2 < 0x100)
+ break;
+ }
+
+ if (div1 >= 254) {
+ dev_err(mmc_dev(host->mmc),
+ "%s: cannot set clock to %d\n", __func__, rate);
+ return;
+ }
+
+ if (div2 == 0)
+ bit_rate = ssp_rate / div1;
+ else
+ bit_rate = ssp_rate / div1 / div2;
+
+ val = readl(host->base + HW_SSP_TIMING);
+ val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE);
+ val |= BF_SSP(div1, TIMING_CLOCK_DIVIDE);
+ val |= BF_SSP(div2 - 1, TIMING_CLOCK_RATE);
+ writel(val, host->base + HW_SSP_TIMING);
+
+ host->clk_rate = bit_rate;
+
+ dev_dbg(mmc_dev(host->mmc),
+ "%s: div1 %d, div2 %d, ssp %d, bit %d, rate %d\n",
+ __func__, div1, div2, ssp_rate, bit_rate, rate);
+}
+
+static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+
+ if (ios->bus_width == MMC_BUS_WIDTH_8)
+ host->bus_width = 2;
+ else if (ios->bus_width == MMC_BUS_WIDTH_4)
+ host->bus_width = 1;
+ else
+ host->bus_width = 0;
+
+ if (ios->clock)
+ mxs_mmc_set_clk_rate(host, ios->clock);
+}
+
+static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ host->sdio_irq_en = enable;
+
+ if (enable) {
+ writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
+ host->base + HW_SSP_CTRL0 + MXS_SET_ADDR);
+ writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
+ host->base + HW_SSP_CTRL1 + MXS_SET_ADDR);
+
+ if (readl(host->base + HW_SSP_STATUS) & BM_SSP_STATUS_SDIO_IRQ)
+ mmc_signal_sdio_irq(host->mmc);
+
+ } else {
+ writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
+ host->base + HW_SSP_CTRL0 + MXS_CLR_ADDR);
+ writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
+ host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static const struct mmc_host_ops mxs_mmc_ops = {
+ .request = mxs_mmc_request,
+ .get_ro = mxs_mmc_get_ro,
+ .get_cd = mxs_mmc_get_cd,
+ .set_ios = mxs_mmc_set_ios,
+ .enable_sdio_irq = mxs_mmc_enable_sdio_irq,
+};
+
+static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param)
+{
+ struct mxs_mmc_host *host = param;
+
+ if (!mxs_dma_is_apbh(chan))
+ return false;
+
+ if (chan->chan_id != host->dma_res->start)
+ return false;
+
+ chan->private = &host->dma_data;
+
+ return true;
+}
+
+static int mxs_mmc_probe(struct platform_device *pdev)
+{
+ struct mxs_mmc_host *host;
+ struct mmc_host *mmc;
+ struct resource *iores, *dmares, *r;
+ struct mxs_mmc_platform_data *pdata;
+ int ret = 0, irq_err, irq_dma;
+ dma_cap_mask_t mask;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ irq_err = platform_get_irq(pdev, 0);
+ irq_dma = platform_get_irq(pdev, 1);
+ if (!iores || !dmares || irq_err < 0 || irq_dma < 0)
+ return -EINVAL;
+
+ r = request_mem_region(iores->start, resource_size(iores), pdev->name);
+ if (!r)
+ return -EBUSY;
+
+ mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
+ if (!mmc) {
+ ret = -ENOMEM;
+ goto out_release_mem;
+ }
+
+ host = mmc_priv(mmc);
+ host->base = ioremap(r->start, resource_size(r));
+ if (!host->base) {
+ ret = -ENOMEM;
+ goto out_mmc_free;
+ }
+
+ /* only major verion does matter */
+ host->version = readl(host->base + HW_SSP_VERSION) >>
+ BP_SSP_VERSION_MAJOR;
+
+ host->mmc = mmc;
+ host->res = r;
+ host->dma_res = dmares;
+ host->irq = irq_err;
+ host->sdio_irq_en = 0;
+
+ host->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk)) {
+ ret = PTR_ERR(host->clk);
+ goto out_iounmap;
+ }
+ clk_enable(host->clk);
+
+ mxs_mmc_reset(host);
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ host->dma_data.chan_irq = irq_dma;
+ host->dmach = dma_request_channel(mask, mxs_mmc_dma_filter, host);
+ if (!host->dmach) {
+ dev_err(mmc_dev(host->mmc),
+ "%s: failed to request dma\n", __func__);
+ goto out_clk_put;
+ }
+
+ /* set mmc core parameters */
+ mmc->ops = &mxs_mmc_ops;
+ mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
+ MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL;
+
+ pdata = mmc_dev(host->mmc)->platform_data;
+ if (pdata) {
+ if (pdata->flags & SLOTF_8_BIT_CAPABLE)
+ mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
+ if (pdata->flags & SLOTF_4_BIT_CAPABLE)
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+ }
+
+ mmc->f_min = 400000;
+ mmc->f_max = 288000000;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+
+ mmc->max_segs = 52;
+ mmc->max_blk_size = 1 << 0xf;
+ mmc->max_blk_count = (ssp_is_old()) ? 0xff : 0xffffff;
+ mmc->max_req_size = (ssp_is_old()) ? 0xffff : 0xffffffff;
+ mmc->max_seg_size = dma_get_max_seg_size(host->dmach->device->dev);
+
+ platform_set_drvdata(pdev, mmc);
+
+ ret = request_irq(host->irq, mxs_mmc_irq_handler, 0, DRIVER_NAME, host);
+ if (ret)
+ goto out_free_dma;
+
+ spin_lock_init(&host->lock);
+
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto out_free_irq;
+
+ dev_info(mmc_dev(host->mmc), "initialized\n");
+
+ return 0;
+
+out_free_irq:
+ free_irq(host->irq, host);
+out_free_dma:
+ if (host->dmach)
+ dma_release_channel(host->dmach);
+out_clk_put:
+ clk_disable(host->clk);
+ clk_put(host->clk);
+out_iounmap:
+ iounmap(host->base);
+out_mmc_free:
+ mmc_free_host(mmc);
+out_release_mem:
+ release_mem_region(iores->start, resource_size(iores));
+ return ret;
+}
+
+static int mxs_mmc_remove(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+ struct resource *res = host->res;
+
+ mmc_remove_host(mmc);
+
+ free_irq(host->irq, host);
+
+ platform_set_drvdata(pdev, NULL);
+
+ if (host->dmach)
+ dma_release_channel(host->dmach);
+
+ clk_disable(host->clk);
+ clk_put(host->clk);
+
+ iounmap(host->base);
+
+ mmc_free_host(mmc);
+
+ release_mem_region(res->start, resource_size(res));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int mxs_mmc_suspend(struct device *dev)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+ int ret = 0;
+
+ ret = mmc_suspend_host(mmc);
+
+ clk_disable(host->clk);
+
+ return ret;
+}
+
+static int mxs_mmc_resume(struct device *dev)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+ int ret = 0;
+
+ clk_enable(host->clk);
+
+ ret = mmc_resume_host(mmc);
+
+ return ret;
+}
+
+static const struct dev_pm_ops mxs_mmc_pm_ops = {
+ .suspend = mxs_mmc_suspend,
+ .resume = mxs_mmc_resume,
+};
+#endif
+
+static struct platform_driver mxs_mmc_driver = {
+ .probe = mxs_mmc_probe,
+ .remove = mxs_mmc_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &mxs_mmc_pm_ops,
+#endif
+ },
+};
+
+static int __init mxs_mmc_init(void)
+{
+ return platform_driver_register(&mxs_mmc_driver);
+}
+
+static void __exit mxs_mmc_exit(void)
+{
+ platform_driver_unregister(&mxs_mmc_driver);
+}
+
+module_init(mxs_mmc_init);
+module_exit(mxs_mmc_exit);
+
+MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral");
+MODULE_AUTHOR("Freescale Semiconductor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 078fdf11af0..158c0ee53b2 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -118,7 +118,7 @@
#define MMC_TIMEOUT_MS 20
#define OMAP_MMC_MASTER_CLOCK 96000000
-#define DRIVER_NAME "mmci-omap-hs"
+#define DRIVER_NAME "omap_hsmmc"
/* Timeouts for entering power saving states on inactivity, msec */
#define OMAP_MMC_DISABLED_TIMEOUT 100
@@ -260,7 +260,7 @@ static int omap_hsmmc_1_set_power(struct device *dev, int slot, int power_on,
return ret;
}
-static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on,
+static int omap_hsmmc_235_set_power(struct device *dev, int slot, int power_on,
int vdd)
{
struct omap_hsmmc_host *host =
@@ -316,6 +316,12 @@ static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on,
return ret;
}
+static int omap_hsmmc_4_set_power(struct device *dev, int slot, int power_on,
+ int vdd)
+{
+ return 0;
+}
+
static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep,
int vdd, int cardsleep)
{
@@ -326,7 +332,7 @@ static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep,
return regulator_set_mode(host->vcc, mode);
}
-static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep,
+static int omap_hsmmc_235_set_sleep(struct device *dev, int slot, int sleep,
int vdd, int cardsleep)
{
struct omap_hsmmc_host *host =
@@ -365,6 +371,12 @@ static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep,
return regulator_enable(host->vcc_aux);
}
+static int omap_hsmmc_4_set_sleep(struct device *dev, int slot, int sleep,
+ int vdd, int cardsleep)
+{
+ return 0;
+}
+
static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
{
struct regulator *reg;
@@ -379,10 +391,14 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
break;
case OMAP_MMC2_DEVID:
case OMAP_MMC3_DEVID:
+ case OMAP_MMC5_DEVID:
/* Off-chip level shifting, or none */
- mmc_slot(host).set_power = omap_hsmmc_23_set_power;
- mmc_slot(host).set_sleep = omap_hsmmc_23_set_sleep;
+ mmc_slot(host).set_power = omap_hsmmc_235_set_power;
+ mmc_slot(host).set_sleep = omap_hsmmc_235_set_sleep;
break;
+ case OMAP_MMC4_DEVID:
+ mmc_slot(host).set_power = omap_hsmmc_4_set_power;
+ mmc_slot(host).set_sleep = omap_hsmmc_4_set_sleep;
default:
pr_err("MMC%d configuration not supported!\n", host->id);
return -EINVAL;
@@ -1555,7 +1571,7 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
break;
}
- if (host->id == OMAP_MMC1_DEVID) {
+ if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
/* Only MMC1 can interface at 3V without some flavor
* of external transceiver; but they all handle 1.8V.
*/
@@ -1647,7 +1663,7 @@ static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host)
u32 hctl, capa, value;
/* Only MMC1 supports 3.0V */
- if (host->id == OMAP_MMC1_DEVID) {
+ if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
hctl = SDVS30;
capa = VS30 | VS18;
} else {
@@ -2101,14 +2117,14 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
/* we start off in DISABLED state */
host->dpm_state = DISABLED;
- if (mmc_host_enable(host->mmc) != 0) {
+ if (clk_enable(host->iclk) != 0) {
clk_put(host->iclk);
clk_put(host->fclk);
goto err1;
}
- if (clk_enable(host->iclk) != 0) {
- mmc_host_disable(host->mmc);
+ if (mmc_host_enable(host->mmc) != 0) {
+ clk_disable(host->iclk);
clk_put(host->iclk);
clk_put(host->fclk);
goto err1;
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 9b82910b9db..3b524856797 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -15,9 +15,11 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/clk.h>
+#include <linux/gpio.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdhci-pltfm.h>
#include <mach/hardware.h>
+#include <mach/esdhc.h>
#include "sdhci.h"
#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
@@ -30,6 +32,39 @@ static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, i
writel(((readl(base) & ~(mask << shift)) | (val << shift)), base);
}
+static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
+{
+ /* fake CARD_PRESENT flag on mx25/35 */
+ u32 val = readl(host->ioaddr + reg);
+
+ if (unlikely(reg == SDHCI_PRESENT_STATE)) {
+ struct esdhc_platform_data *boarddata =
+ host->mmc->parent->platform_data;
+
+ if (boarddata && gpio_is_valid(boarddata->cd_gpio)
+ && gpio_get_value(boarddata->cd_gpio))
+ /* no card, if a valid gpio says so... */
+ val &= SDHCI_CARD_PRESENT;
+ else
+ /* ... in all other cases assume card is present */
+ val |= SDHCI_CARD_PRESENT;
+ }
+
+ return val;
+}
+
+static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
+{
+ if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE))
+ /*
+ * these interrupts won't work with a custom card_detect gpio
+ * (only applied to mx25/35)
+ */
+ val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
+
+ writel(val, host->ioaddr + reg);
+}
+
static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
{
if (unlikely(reg == SDHCI_HOST_VERSION))
@@ -100,10 +135,39 @@ static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
return clk_get_rate(pltfm_host->clk) / 256 / 16;
}
+static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
+{
+ struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
+
+ if (boarddata && gpio_is_valid(boarddata->wp_gpio))
+ return gpio_get_value(boarddata->wp_gpio);
+ else
+ return -ENOSYS;
+}
+
+static struct sdhci_ops sdhci_esdhc_ops = {
+ .read_w = esdhc_readw_le,
+ .write_w = esdhc_writew_le,
+ .write_b = esdhc_writeb_le,
+ .set_clock = esdhc_set_clock,
+ .get_max_clock = esdhc_pltfm_get_max_clock,
+ .get_min_clock = esdhc_pltfm_get_min_clock,
+};
+
+static irqreturn_t cd_irq(int irq, void *data)
+{
+ struct sdhci_host *sdhost = (struct sdhci_host *)data;
+
+ tasklet_schedule(&sdhost->card_tasklet);
+ return IRQ_HANDLED;
+};
+
static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pdata)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
struct clk *clk;
+ int err;
clk = clk_get(mmc_dev(host->mmc), NULL);
if (IS_ERR(clk)) {
@@ -116,32 +180,78 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd
if (cpu_is_mx35() || cpu_is_mx51())
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
- /* Fix errata ENGcm07207 which is present on i.MX25 and i.MX35 */
- if (cpu_is_mx25() || cpu_is_mx35())
+ if (cpu_is_mx25() || cpu_is_mx35()) {
+ /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */
host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
+ /* write_protect can't be routed to controller, use gpio */
+ sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro;
+ }
+
+ if (boarddata) {
+ err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP");
+ if (err) {
+ dev_warn(mmc_dev(host->mmc),
+ "no write-protect pin available!\n");
+ boarddata->wp_gpio = err;
+ }
+
+ err = gpio_request_one(boarddata->cd_gpio, GPIOF_IN, "ESDHC_CD");
+ if (err) {
+ dev_warn(mmc_dev(host->mmc),
+ "no card-detect pin available!\n");
+ goto no_card_detect_pin;
+ }
+
+ /* i.MX5x has issues to be researched */
+ if (!cpu_is_mx25() && !cpu_is_mx35())
+ goto not_supported;
+
+ err = request_irq(gpio_to_irq(boarddata->cd_gpio), cd_irq,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ mmc_hostname(host->mmc), host);
+ if (err) {
+ dev_warn(mmc_dev(host->mmc), "request irq error\n");
+ goto no_card_detect_irq;
+ }
+
+ sdhci_esdhc_ops.write_l = esdhc_writel_le;
+ sdhci_esdhc_ops.read_l = esdhc_readl_le;
+ /* Now we have a working card_detect again */
+ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+ }
+
+ return 0;
+ no_card_detect_irq:
+ gpio_free(boarddata->cd_gpio);
+ no_card_detect_pin:
+ boarddata->cd_gpio = err;
+ not_supported:
return 0;
}
static void esdhc_pltfm_exit(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
+
+ if (boarddata && gpio_is_valid(boarddata->wp_gpio))
+ gpio_free(boarddata->wp_gpio);
+
+ if (boarddata && gpio_is_valid(boarddata->cd_gpio)) {
+ gpio_free(boarddata->cd_gpio);
+
+ if (!(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION))
+ free_irq(gpio_to_irq(boarddata->cd_gpio), host);
+ }
clk_disable(pltfm_host->clk);
clk_put(pltfm_host->clk);
}
-static struct sdhci_ops sdhci_esdhc_ops = {
- .read_w = esdhc_readw_le,
- .write_w = esdhc_writew_le,
- .write_b = esdhc_writeb_le,
- .set_clock = esdhc_set_clock,
- .get_max_clock = esdhc_pltfm_get_max_clock,
- .get_min_clock = esdhc_pltfm_get_min_clock,
-};
-
struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
- .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA,
+ .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA
+ | SDHCI_QUIRK_BROKEN_CARD_DETECTION,
/* ADMA has issues. Might be fixable */
.ops = &sdhci_esdhc_ops,
.init = esdhc_pltfm_init,
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index afaf1bc4913..c55aae828aa 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -19,7 +19,6 @@
*/
#define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \
- SDHCI_QUIRK_BROKEN_CARD_DETECTION | \
SDHCI_QUIRK_NO_BUSY_IRQ | \
SDHCI_QUIRK_NONSTANDARD_CLOCK | \
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index fcd0e1fcba4..08161f690ae 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -73,7 +73,8 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
}
struct sdhci_of_data sdhci_esdhc = {
- .quirks = ESDHC_DEFAULT_QUIRKS,
+ /* card detection could be handled via GPIO */
+ .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION,
.ops = {
.read_l = sdhci_be32bs_readl,
.read_w = esdhc_readw,
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index b1b2fc04dd7..603035a4593 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -908,9 +908,6 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
{
struct sdhci_pci_slot *slot;
struct sdhci_host *host;
-
- resource_size_t addr;
-
int ret;
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
@@ -957,7 +954,6 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
goto free;
}
- addr = pci_resource_start(pdev, bar);
host->ioaddr = pci_ioremap_bar(pdev, bar);
if (!host->ioaddr) {
dev_err(&pdev->dev, "failed to remap registers\n");
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 5309ab95aad..69e3ee321eb 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -499,6 +499,9 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
* SDHCI block, or a missing configuration that needs to be set. */
host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ;
+ /* This host supports the Auto CMD12 */
+ host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
+
if (pdata->cd_type == S3C_SDHCI_CD_NONE ||
pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 4823ee94a63..f7e1f964395 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -169,7 +169,7 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
if (rc) {
dev_err(mmc_dev(host->mmc),
"failed to allocate wp gpio\n");
- goto out_cd;
+ goto out_irq;
}
tegra_gpio_enable(plat->wp_gpio);
gpio_direction_input(plat->wp_gpio);
@@ -195,6 +195,9 @@ out_wp:
gpio_free(plat->wp_gpio);
}
+out_irq:
+ if (gpio_is_valid(plat->cd_gpio))
+ free_irq(gpio_to_irq(plat->cd_gpio), host);
out_cd:
if (gpio_is_valid(plat->cd_gpio)) {
tegra_gpio_disable(plat->cd_gpio);
@@ -225,6 +228,7 @@ static void tegra_sdhci_pltfm_exit(struct sdhci_host *host)
}
if (gpio_is_valid(plat->cd_gpio)) {
+ free_irq(gpio_to_irq(plat->cd_gpio), host);
tegra_gpio_disable(plat->cd_gpio);
gpio_free(plat->cd_gpio);
}
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 12884c27017..af97015a2fc 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -169,7 +169,7 @@ struct sh_mmcif_host {
struct dma_chan *chan_rx;
struct dma_chan *chan_tx;
struct completion dma_complete;
- unsigned int dma_sglen;
+ bool dma_active;
};
static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
@@ -194,10 +194,12 @@ static void mmcif_dma_complete(void *arg)
return;
if (host->data->flags & MMC_DATA_READ)
- dma_unmap_sg(&host->pd->dev, host->data->sg, host->dma_sglen,
+ dma_unmap_sg(host->chan_rx->device->dev,
+ host->data->sg, host->data->sg_len,
DMA_FROM_DEVICE);
else
- dma_unmap_sg(&host->pd->dev, host->data->sg, host->dma_sglen,
+ dma_unmap_sg(host->chan_tx->device->dev,
+ host->data->sg, host->data->sg_len,
DMA_TO_DEVICE);
complete(&host->dma_complete);
@@ -211,9 +213,10 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
dma_cookie_t cookie = -EINVAL;
int ret;
- ret = dma_map_sg(&host->pd->dev, sg, host->data->sg_len, DMA_FROM_DEVICE);
+ ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
+ DMA_FROM_DEVICE);
if (ret > 0) {
- host->dma_sglen = ret;
+ host->dma_active = true;
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
}
@@ -221,14 +224,9 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
if (desc) {
desc->callback = mmcif_dma_complete;
desc->callback_param = host;
- cookie = desc->tx_submit(desc);
- if (cookie < 0) {
- desc = NULL;
- ret = cookie;
- } else {
- sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
- chan->device->device_issue_pending(chan);
- }
+ cookie = dmaengine_submit(desc);
+ sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
+ dma_async_issue_pending(chan);
}
dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
__func__, host->data->sg_len, ret, cookie);
@@ -238,7 +236,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
if (ret >= 0)
ret = -EIO;
host->chan_rx = NULL;
- host->dma_sglen = 0;
+ host->dma_active = false;
dma_release_channel(chan);
/* Free the Tx channel too */
chan = host->chan_tx;
@@ -263,9 +261,10 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
dma_cookie_t cookie = -EINVAL;
int ret;
- ret = dma_map_sg(&host->pd->dev, sg, host->data->sg_len, DMA_TO_DEVICE);
+ ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
+ DMA_TO_DEVICE);
if (ret > 0) {
- host->dma_sglen = ret;
+ host->dma_active = true;
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
}
@@ -273,14 +272,9 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
if (desc) {
desc->callback = mmcif_dma_complete;
desc->callback_param = host;
- cookie = desc->tx_submit(desc);
- if (cookie < 0) {
- desc = NULL;
- ret = cookie;
- } else {
- sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
- chan->device->device_issue_pending(chan);
- }
+ cookie = dmaengine_submit(desc);
+ sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
+ dma_async_issue_pending(chan);
}
dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
__func__, host->data->sg_len, ret, cookie);
@@ -290,7 +284,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
if (ret >= 0)
ret = -EIO;
host->chan_tx = NULL;
- host->dma_sglen = 0;
+ host->dma_active = false;
dma_release_channel(chan);
/* Free the Rx channel too */
chan = host->chan_rx;
@@ -317,7 +311,7 @@ static bool sh_mmcif_filter(struct dma_chan *chan, void *arg)
static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
struct sh_mmcif_plat_data *pdata)
{
- host->dma_sglen = 0;
+ host->dma_active = false;
/* We can only either use DMA for both Tx and Rx or not use it at all */
if (pdata->dma) {
@@ -364,7 +358,7 @@ static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
dma_release_channel(chan);
}
- host->dma_sglen = 0;
+ host->dma_active = false;
}
static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
@@ -753,7 +747,7 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
}
sh_mmcif_get_response(host, cmd);
if (host->data) {
- if (!host->dma_sglen) {
+ if (!host->dma_active) {
ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
} else {
long time =
@@ -765,7 +759,7 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
ret = time;
sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
- host->dma_sglen = 0;
+ host->dma_active = false;
}
if (ret < 0)
mrq->data->bytes_xfered = 0;
@@ -850,15 +844,15 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct sh_mmcif_host *host = mmc_priv(mmc);
struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
- if (ios->power_mode == MMC_POWER_OFF) {
+ if (ios->power_mode == MMC_POWER_UP) {
+ if (p->set_pwr)
+ p->set_pwr(host->pd, ios->power_mode);
+ } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
/* clock stop */
sh_mmcif_clock_control(host, 0);
- if (p->down_pwr)
+ if (ios->power_mode == MMC_POWER_OFF && p->down_pwr)
p->down_pwr(host->pd);
return;
- } else if (ios->power_mode == MMC_POWER_UP) {
- if (p->set_pwr)
- p->set_pwr(host->pd, ios->power_mode);
}
if (ios->clock)
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index e3c6ef20839..ab1adeabdd2 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -152,7 +152,6 @@ struct tmio_mmc_host {
struct tasklet_struct dma_complete;
struct tasklet_struct dma_issue;
#ifdef CONFIG_TMIO_MMC_DMA
- unsigned int dma_sglen;
u8 bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN)));
struct scatterlist bounce_sg;
#endif
@@ -220,44 +219,48 @@ static char *tmio_mmc_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
}
-static void tmio_mmc_kunmap_atomic(void *virt, unsigned long *flags)
+static void tmio_mmc_kunmap_atomic(struct scatterlist *sg, unsigned long *flags, void *virt)
{
- kunmap_atomic(virt, KM_BIO_SRC_IRQ);
+ kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ);
local_irq_restore(*flags);
}
#ifdef CONFIG_MMC_DEBUG
-#define STATUS_TO_TEXT(a) \
+#define STATUS_TO_TEXT(a, status, i) \
do { \
- if (status & TMIO_STAT_##a) \
+ if (status & TMIO_STAT_##a) { \
+ if (i++) \
+ printk(" | "); \
printk(#a); \
+ } \
} while (0)
void pr_debug_status(u32 status)
{
+ int i = 0;
printk(KERN_DEBUG "status: %08x = ", status);
- STATUS_TO_TEXT(CARD_REMOVE);
- STATUS_TO_TEXT(CARD_INSERT);
- STATUS_TO_TEXT(SIGSTATE);
- STATUS_TO_TEXT(WRPROTECT);
- STATUS_TO_TEXT(CARD_REMOVE_A);
- STATUS_TO_TEXT(CARD_INSERT_A);
- STATUS_TO_TEXT(SIGSTATE_A);
- STATUS_TO_TEXT(CMD_IDX_ERR);
- STATUS_TO_TEXT(STOPBIT_ERR);
- STATUS_TO_TEXT(ILL_FUNC);
- STATUS_TO_TEXT(CMD_BUSY);
- STATUS_TO_TEXT(CMDRESPEND);
- STATUS_TO_TEXT(DATAEND);
- STATUS_TO_TEXT(CRCFAIL);
- STATUS_TO_TEXT(DATATIMEOUT);
- STATUS_TO_TEXT(CMDTIMEOUT);
- STATUS_TO_TEXT(RXOVERFLOW);
- STATUS_TO_TEXT(TXUNDERRUN);
- STATUS_TO_TEXT(RXRDY);
- STATUS_TO_TEXT(TXRQ);
- STATUS_TO_TEXT(ILL_ACCESS);
+ STATUS_TO_TEXT(CARD_REMOVE, status, i);
+ STATUS_TO_TEXT(CARD_INSERT, status, i);
+ STATUS_TO_TEXT(SIGSTATE, status, i);
+ STATUS_TO_TEXT(WRPROTECT, status, i);
+ STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
+ STATUS_TO_TEXT(CARD_INSERT_A, status, i);
+ STATUS_TO_TEXT(SIGSTATE_A, status, i);
+ STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
+ STATUS_TO_TEXT(STOPBIT_ERR, status, i);
+ STATUS_TO_TEXT(ILL_FUNC, status, i);
+ STATUS_TO_TEXT(CMD_BUSY, status, i);
+ STATUS_TO_TEXT(CMDRESPEND, status, i);
+ STATUS_TO_TEXT(DATAEND, status, i);
+ STATUS_TO_TEXT(CRCFAIL, status, i);
+ STATUS_TO_TEXT(DATATIMEOUT, status, i);
+ STATUS_TO_TEXT(CMDTIMEOUT, status, i);
+ STATUS_TO_TEXT(RXOVERFLOW, status, i);
+ STATUS_TO_TEXT(TXUNDERRUN, status, i);
+ STATUS_TO_TEXT(RXRDY, status, i);
+ STATUS_TO_TEXT(TXRQ, status, i);
+ STATUS_TO_TEXT(ILL_ACCESS, status, i);
printk("\n");
}
@@ -300,8 +303,7 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
{
- struct mfd_cell *cell = host->pdev->dev.platform_data;
- struct tmio_mmc_data *pdata = cell->driver_data;
+ struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
/*
* Testing on sh-mobile showed that SDIO IRQs are unmasked when
@@ -324,8 +326,7 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
{
- struct mfd_cell *cell = host->pdev->dev.platform_data;
- struct tmio_mmc_data *pdata = cell->driver_data;
+ struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
@@ -507,7 +508,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
host->sg_off += count;
- tmio_mmc_kunmap_atomic(sg_virt, &flags);
+ tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
if (host->sg_off == host->sg_ptr->length)
tmio_mmc_next_sg(host);
@@ -666,8 +667,7 @@ out:
static irqreturn_t tmio_mmc_irq(int irq, void *devid)
{
struct tmio_mmc_host *host = devid;
- struct mfd_cell *cell = host->pdev->dev.platform_data;
- struct tmio_mmc_data *pdata = cell->driver_data;
+ struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
unsigned int ireg, irq_mask, status;
unsigned int sdio_ireg, sdio_irq_mask, sdio_status;
@@ -767,7 +767,7 @@ static void tmio_check_bounce_buffer(struct tmio_mmc_host *host)
unsigned long flags;
void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
- tmio_mmc_kunmap_atomic(sg_vaddr, &flags);
+ tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
}
}
@@ -796,8 +796,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_rx;
- struct mfd_cell *cell = host->pdev->dev.platform_data;
- struct tmio_mmc_data *pdata = cell->driver_data;
+ struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
dma_cookie_t cookie;
int ret, i;
bool aligned = true, multiple = true;
@@ -825,23 +824,16 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
sg = host->sg_ptr;
}
- ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE);
- if (ret > 0) {
- host->dma_sglen = ret;
+ ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
+ if (ret > 0)
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- }
if (desc) {
desc->callback = tmio_dma_complete;
desc->callback_param = host;
- cookie = desc->tx_submit(desc);
- if (cookie < 0) {
- desc = NULL;
- ret = cookie;
- } else {
- chan->device->device_issue_pending(chan);
- }
+ cookie = dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
}
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
__func__, host->sg_len, ret, cookie, host->mrq);
@@ -873,8 +865,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_tx;
- struct mfd_cell *cell = host->pdev->dev.platform_data;
- struct tmio_mmc_data *pdata = cell->driver_data;
+ struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
dma_cookie_t cookie;
int ret, i;
bool aligned = true, multiple = true;
@@ -901,26 +892,20 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
- tmio_mmc_kunmap_atomic(sg_vaddr, &flags);
+ tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
host->sg_ptr = &host->bounce_sg;
sg = host->sg_ptr;
}
- ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE);
- if (ret > 0) {
- host->dma_sglen = ret;
+ ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
+ if (ret > 0)
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- }
if (desc) {
desc->callback = tmio_dma_complete;
desc->callback_param = host;
- cookie = desc->tx_submit(desc);
- if (cookie < 0) {
- desc = NULL;
- ret = cookie;
- }
+ cookie = dmaengine_submit(desc);
}
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
__func__, host->sg_len, ret, cookie, host->mrq);
@@ -964,7 +949,7 @@ static void tmio_issue_tasklet_fn(unsigned long priv)
struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
struct dma_chan *chan = host->chan_tx;
- chan->device->device_issue_pending(chan);
+ dma_async_issue_pending(chan);
}
static void tmio_tasklet_fn(unsigned long arg)
@@ -978,10 +963,12 @@ static void tmio_tasklet_fn(unsigned long arg)
goto out;
if (host->data->flags & MMC_DATA_READ)
- dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
+ dma_unmap_sg(host->chan_rx->device->dev,
+ host->sg_ptr, host->sg_len,
DMA_FROM_DEVICE);
else
- dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
+ dma_unmap_sg(host->chan_tx->device->dev,
+ host->sg_ptr, host->sg_len,
DMA_TO_DEVICE);
tmio_mmc_do_data_irq(host);
@@ -1071,8 +1058,7 @@ static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
static int tmio_mmc_start_data(struct tmio_mmc_host *host,
struct mmc_data *data)
{
- struct mfd_cell *cell = host->pdev->dev.platform_data;
- struct tmio_mmc_data *pdata = cell->driver_data;
+ struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
data->blksz, data->blocks);
@@ -1177,8 +1163,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
static int tmio_mmc_get_ro(struct mmc_host *mmc)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
- struct mfd_cell *cell = host->pdev->dev.platform_data;
- struct tmio_mmc_data *pdata = cell->driver_data;
+ struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)) ? 0 : 1;
@@ -1187,8 +1172,7 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
static int tmio_mmc_get_cd(struct mmc_host *mmc)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
- struct mfd_cell *cell = host->pdev->dev.platform_data;
- struct tmio_mmc_data *pdata = cell->driver_data;
+ struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
if (!pdata->get_cd)
return -ENOSYS;
@@ -1207,7 +1191,7 @@ static const struct mmc_host_ops tmio_mmc_ops = {
#ifdef CONFIG_PM
static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct mmc_host *mmc = platform_get_drvdata(dev);
int ret;
@@ -1222,7 +1206,7 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
static int tmio_mmc_resume(struct platform_device *dev)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct mmc_host *mmc = platform_get_drvdata(dev);
int ret = 0;
@@ -1245,7 +1229,7 @@ out:
static int __devinit tmio_mmc_probe(struct platform_device *dev)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct tmio_mmc_data *pdata;
struct resource *res_ctl;
struct tmio_mmc_host *host;
@@ -1260,7 +1244,7 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
if (!res_ctl)
goto out;
- pdata = cell->driver_data;
+ pdata = mfd_get_data(dev);
if (!pdata || !pdata->hclk)
goto out;
@@ -1360,7 +1344,7 @@ out:
static int __devexit tmio_mmc_remove(struct platform_device *dev)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct mmc_host *mmc = platform_get_drvdata(dev);
platform_set_drvdata(dev, NULL);
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 9ed84ddb478..8c5b4881ccd 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -802,12 +802,9 @@ static const struct mmc_host_ops via_sdc_ops = {
static void via_reset_pcictrl(struct via_crdr_mmc_host *host)
{
- void __iomem *addrbase;
unsigned long flags;
u8 gatt;
- addrbase = host->pcictrl_mmiobase;
-
spin_lock_irqsave(&host->lock, flags);
via_save_pcictrlreg(host);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index c89592239bc..4f6c06f1632 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -106,23 +106,6 @@ config MTD_NAND_OMAP2
help
Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms.
-config MTD_NAND_OMAP_PREFETCH
- bool "GPMC prefetch support for NAND Flash device"
- depends on MTD_NAND_OMAP2
- default y
- help
- The NAND device can be accessed for Read/Write using GPMC PREFETCH engine
- to improve the performance.
-
-config MTD_NAND_OMAP_PREFETCH_DMA
- depends on MTD_NAND_OMAP_PREFETCH
- bool "DMA mode"
- default n
- help
- The GPMC PREFETCH engine can be configured eigther in MPU interrupt mode
- or in DMA interrupt mode.
- Say y for DMA mode or MPU mode will be used
-
config MTD_NAND_IDS
tristate
@@ -476,7 +459,7 @@ config MTD_NAND_MPC5121_NFC
config MTD_NAND_MXC
tristate "MXC NAND support"
- depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3 || ARCH_MX51
+ depends on IMX_HAVE_PLATFORM_MXC_NAND
help
This enables the driver for the NAND flash controller on the
MXC processors.
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 28af71c6183..7b8f1fffc52 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -11,6 +11,7 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
+#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/mtd/mtd.h>
@@ -24,6 +25,7 @@
#include <plat/nand.h>
#define DRIVER_NAME "omap2-nand"
+#define OMAP_NAND_TIMEOUT_MS 5000
#define NAND_Ecc_P1e (1 << 0)
#define NAND_Ecc_P2e (1 << 1)
@@ -96,26 +98,19 @@
static const char *part_probes[] = { "cmdlinepart", NULL };
#endif
-#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH
-static int use_prefetch = 1;
-
-/* "modprobe ... use_prefetch=0" etc */
-module_param(use_prefetch, bool, 0);
-MODULE_PARM_DESC(use_prefetch, "enable/disable use of PREFETCH");
-
-#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA
-static int use_dma = 1;
+/* oob info generated runtime depending on ecc algorithm and layout selected */
+static struct nand_ecclayout omap_oobinfo;
+/* Define some generic bad / good block scan pattern which are used
+ * while scanning a device for factory marked good / bad blocks
+ */
+static uint8_t scan_ff_pattern[] = { 0xff };
+static struct nand_bbt_descr bb_descrip_flashbased = {
+ .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
+ .offs = 0,
+ .len = 1,
+ .pattern = scan_ff_pattern,
+};
-/* "modprobe ... use_dma=0" etc */
-module_param(use_dma, bool, 0);
-MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
-#else
-static const int use_dma;
-#endif
-#else
-const int use_prefetch;
-static const int use_dma;
-#endif
struct omap_nand_info {
struct nand_hw_control controller;
@@ -129,6 +124,13 @@ struct omap_nand_info {
unsigned long phys_base;
struct completion comp;
int dma_ch;
+ int gpmc_irq;
+ enum {
+ OMAP_NAND_IO_READ = 0, /* read */
+ OMAP_NAND_IO_WRITE, /* write */
+ } iomode;
+ u_char *buf;
+ int buf_len;
};
/**
@@ -256,7 +258,8 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
}
/* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0);
+ ret = gpmc_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0);
if (ret) {
/* PFPW engine is busy, use cpu copy method */
if (info->nand.options & NAND_BUSWIDTH_16)
@@ -288,9 +291,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
{
struct omap_nand_info *info = container_of(mtd,
struct omap_nand_info, mtd);
- uint32_t pref_count = 0, w_count = 0;
+ uint32_t w_count = 0;
int i = 0, ret = 0;
u16 *p;
+ unsigned long tim, limit;
/* take care of subpage writes */
if (len % 2 != 0) {
@@ -300,7 +304,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
}
/* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x1);
+ ret = gpmc_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1);
if (ret) {
/* PFPW engine is busy, use cpu copy method */
if (info->nand.options & NAND_BUSWIDTH_16)
@@ -316,15 +321,17 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
iowrite16(*p++, info->nand.IO_ADDR_W);
}
/* wait for data to flushed-out before reset the prefetch */
- do {
- pref_count = gpmc_read_status(GPMC_PREFETCH_COUNT);
- } while (pref_count);
+ tim = 0;
+ limit = (loops_per_jiffy *
+ msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+ while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+ cpu_relax();
+
/* disable and stop the PFPW engine */
gpmc_prefetch_reset(info->gpmc_cs);
}
}
-#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA
/*
* omap_nand_dma_cb: callback on the completion of dma transfer
* @lch: logical channel
@@ -348,14 +355,15 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
{
struct omap_nand_info *info = container_of(mtd,
struct omap_nand_info, mtd);
- uint32_t prefetch_status = 0;
enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
DMA_FROM_DEVICE;
dma_addr_t dma_addr;
int ret;
+ unsigned long tim, limit;
- /* The fifo depth is 64 bytes. We have a sync at each frame and frame
- * length is 64 bytes.
+ /* The fifo depth is 64 bytes max.
+ * But configure the FIFO-threahold to 32 to get a sync at each frame
+ * and frame length is 32 bytes.
*/
int buf_len = len >> 6;
@@ -396,9 +404,10 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
}
/* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs, 0x1, len, is_write);
+ ret = gpmc_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
if (ret)
- /* PFPW engine is busy, use cpu copy methode */
+ /* PFPW engine is busy, use cpu copy method */
goto out_copy;
init_completion(&info->comp);
@@ -407,10 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
/* setup and start DMA using dma_addr */
wait_for_completion(&info->comp);
+ tim = 0;
+ limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+ while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+ cpu_relax();
- do {
- prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT);
- } while (prefetch_status);
/* disable and stop the PFPW engine */
gpmc_prefetch_reset(info->gpmc_cs);
@@ -426,14 +436,6 @@ out_copy:
: omap_write_buf8(mtd, (u_char *) addr, len);
return 0;
}
-#else
-static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) {}
-static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
- unsigned int len, int is_write)
-{
- return 0;
-}
-#endif
/**
* omap_read_buf_dma_pref - read data from NAND controller into buffer
@@ -466,6 +468,157 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd,
omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
}
+/*
+ * omap_nand_irq - GMPC irq handler
+ * @this_irq: gpmc irq number
+ * @dev: omap_nand_info structure pointer is passed here
+ */
+static irqreturn_t omap_nand_irq(int this_irq, void *dev)
+{
+ struct omap_nand_info *info = (struct omap_nand_info *) dev;
+ u32 bytes;
+ u32 irq_stat;
+
+ irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS);
+ bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
+ bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
+ if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
+ if (irq_stat & 0x2)
+ goto done;
+
+ if (info->buf_len && (info->buf_len < bytes))
+ bytes = info->buf_len;
+ else if (!info->buf_len)
+ bytes = 0;
+ iowrite32_rep(info->nand.IO_ADDR_W,
+ (u32 *)info->buf, bytes >> 2);
+ info->buf = info->buf + bytes;
+ info->buf_len -= bytes;
+
+ } else {
+ ioread32_rep(info->nand.IO_ADDR_R,
+ (u32 *)info->buf, bytes >> 2);
+ info->buf = info->buf + bytes;
+
+ if (irq_stat & 0x2)
+ goto done;
+ }
+ gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
+
+ return IRQ_HANDLED;
+
+done:
+ complete(&info->comp);
+ /* disable irq */
+ gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0);
+
+ /* clear status */
+ gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * omap_read_buf_irq_pref - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ int ret = 0;
+
+ if (len <= mtd->oobsize) {
+ omap_read_buf_pref(mtd, buf, len);
+ return;
+ }
+
+ info->iomode = OMAP_NAND_IO_READ;
+ info->buf = buf;
+ init_completion(&info->comp);
+
+ /* configure and start prefetch transfer */
+ ret = gpmc_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0);
+ if (ret)
+ /* PFPW engine is busy, use cpu copy method */
+ goto out_copy;
+
+ info->buf_len = len;
+ /* enable irq */
+ gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
+ (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
+
+ /* waiting for read to complete */
+ wait_for_completion(&info->comp);
+
+ /* disable and stop the PFPW engine */
+ gpmc_prefetch_reset(info->gpmc_cs);
+ return;
+
+out_copy:
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_read_buf16(mtd, buf, len);
+ else
+ omap_read_buf8(mtd, buf, len);
+}
+
+/*
+ * omap_write_buf_irq_pref - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf_irq_pref(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ int ret = 0;
+ unsigned long tim, limit;
+
+ if (len <= mtd->oobsize) {
+ omap_write_buf_pref(mtd, buf, len);
+ return;
+ }
+
+ info->iomode = OMAP_NAND_IO_WRITE;
+ info->buf = (u_char *) buf;
+ init_completion(&info->comp);
+
+ /* configure and start prefetch transfer : size=24 */
+ ret = gpmc_prefetch_enable(info->gpmc_cs,
+ (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1);
+ if (ret)
+ /* PFPW engine is busy, use cpu copy method */
+ goto out_copy;
+
+ info->buf_len = len;
+ /* enable irq */
+ gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
+ (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
+
+ /* waiting for write to complete */
+ wait_for_completion(&info->comp);
+ /* wait for data to flushed-out before reset the prefetch */
+ tim = 0;
+ limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+ while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+ cpu_relax();
+
+ /* disable and stop the PFPW engine */
+ gpmc_prefetch_reset(info->gpmc_cs);
+ return;
+
+out_copy:
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_write_buf16(mtd, buf, len);
+ else
+ omap_write_buf8(mtd, buf, len);
+}
+
/**
* omap_verify_buf - Verify chip data against buffer
* @mtd: MTD device structure
@@ -487,8 +640,6 @@ static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
return 0;
}
-#ifdef CONFIG_MTD_NAND_OMAP_HWECC
-
/**
* gen_true_ecc - This function will generate true ECC value
* @ecc_buf: buffer to store ecc code
@@ -708,8 +859,6 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size);
}
-#endif
-
/**
* omap_wait - wait until the command is done
* @mtd: MTD device structure
@@ -779,6 +928,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
struct omap_nand_info *info;
struct omap_nand_platform_data *pdata;
int err;
+ int i, offset;
pdata = pdev->dev.platform_data;
if (pdata == NULL) {
@@ -804,7 +954,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->mtd.name = dev_name(&pdev->dev);
info->mtd.owner = THIS_MODULE;
- info->nand.options |= pdata->devsize ? NAND_BUSWIDTH_16 : 0;
+ info->nand.options = pdata->devsize;
info->nand.options |= NAND_SKIP_BBTSCAN;
/* NAND write protect off */
@@ -842,28 +992,13 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->nand.chip_delay = 50;
}
- if (use_prefetch) {
-
+ switch (pdata->xfer_type) {
+ case NAND_OMAP_PREFETCH_POLLED:
info->nand.read_buf = omap_read_buf_pref;
info->nand.write_buf = omap_write_buf_pref;
- if (use_dma) {
- err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
- omap_nand_dma_cb, &info->comp, &info->dma_ch);
- if (err < 0) {
- info->dma_ch = -1;
- printk(KERN_WARNING "DMA request failed."
- " Non-dma data transfer mode\n");
- } else {
- omap_set_dma_dest_burst_mode(info->dma_ch,
- OMAP_DMA_DATA_BURST_16);
- omap_set_dma_src_burst_mode(info->dma_ch,
- OMAP_DMA_DATA_BURST_16);
-
- info->nand.read_buf = omap_read_buf_dma_pref;
- info->nand.write_buf = omap_write_buf_dma_pref;
- }
- }
- } else {
+ break;
+
+ case NAND_OMAP_POLLED:
if (info->nand.options & NAND_BUSWIDTH_16) {
info->nand.read_buf = omap_read_buf16;
info->nand.write_buf = omap_write_buf16;
@@ -871,20 +1006,61 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->nand.read_buf = omap_read_buf8;
info->nand.write_buf = omap_write_buf8;
}
+ break;
+
+ case NAND_OMAP_PREFETCH_DMA:
+ err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
+ omap_nand_dma_cb, &info->comp, &info->dma_ch);
+ if (err < 0) {
+ info->dma_ch = -1;
+ dev_err(&pdev->dev, "DMA request failed!\n");
+ goto out_release_mem_region;
+ } else {
+ omap_set_dma_dest_burst_mode(info->dma_ch,
+ OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_src_burst_mode(info->dma_ch,
+ OMAP_DMA_DATA_BURST_16);
+
+ info->nand.read_buf = omap_read_buf_dma_pref;
+ info->nand.write_buf = omap_write_buf_dma_pref;
+ }
+ break;
+
+ case NAND_OMAP_PREFETCH_IRQ:
+ err = request_irq(pdata->gpmc_irq,
+ omap_nand_irq, IRQF_SHARED, "gpmc-nand", info);
+ if (err) {
+ dev_err(&pdev->dev, "requesting irq(%d) error:%d",
+ pdata->gpmc_irq, err);
+ goto out_release_mem_region;
+ } else {
+ info->gpmc_irq = pdata->gpmc_irq;
+ info->nand.read_buf = omap_read_buf_irq_pref;
+ info->nand.write_buf = omap_write_buf_irq_pref;
+ }
+ break;
+
+ default:
+ dev_err(&pdev->dev,
+ "xfer_type(%d) not supported!\n", pdata->xfer_type);
+ err = -EINVAL;
+ goto out_release_mem_region;
}
- info->nand.verify_buf = omap_verify_buf;
-#ifdef CONFIG_MTD_NAND_OMAP_HWECC
- info->nand.ecc.bytes = 3;
- info->nand.ecc.size = 512;
- info->nand.ecc.calculate = omap_calculate_ecc;
- info->nand.ecc.hwctl = omap_enable_hwecc;
- info->nand.ecc.correct = omap_correct_data;
- info->nand.ecc.mode = NAND_ECC_HW;
+ info->nand.verify_buf = omap_verify_buf;
-#else
- info->nand.ecc.mode = NAND_ECC_SOFT;
-#endif
+ /* selsect the ecc type */
+ if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
+ info->nand.ecc.mode = NAND_ECC_SOFT;
+ else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) ||
+ (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) {
+ info->nand.ecc.bytes = 3;
+ info->nand.ecc.size = 512;
+ info->nand.ecc.calculate = omap_calculate_ecc;
+ info->nand.ecc.hwctl = omap_enable_hwecc;
+ info->nand.ecc.correct = omap_correct_data;
+ info->nand.ecc.mode = NAND_ECC_HW;
+ }
/* DIP switches on some boards change between 8 and 16 bit
* bus widths for flash. Try the other width if the first try fails.
@@ -897,6 +1073,26 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
}
}
+ /* rom code layout */
+ if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) {
+
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ offset = 2;
+ else {
+ offset = 1;
+ info->nand.badblock_pattern = &bb_descrip_flashbased;
+ }
+ omap_oobinfo.eccbytes = 3 * (info->mtd.oobsize/16);
+ for (i = 0; i < omap_oobinfo.eccbytes; i++)
+ omap_oobinfo.eccpos[i] = i+offset;
+
+ omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes;
+ omap_oobinfo.oobfree->length = info->mtd.oobsize -
+ (offset + omap_oobinfo.eccbytes);
+
+ info->nand.ecc.layout = &omap_oobinfo;
+ }
+
#ifdef CONFIG_MTD_PARTITIONS
err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
if (err > 0)
@@ -926,9 +1122,12 @@ static int omap_nand_remove(struct platform_device *pdev)
mtd);
platform_set_drvdata(pdev, NULL);
- if (use_dma)
+ if (info->dma_ch != -1)
omap_free_dma(info->dma_ch);
+ if (info->gpmc_irq)
+ free_irq(info->gpmc_irq, info);
+
/* Release NAND device, its internal structures and partitions */
nand_release(&info->mtd);
iounmap(info->nand.IO_ADDR_R);
@@ -947,16 +1146,8 @@ static struct platform_driver omap_nand_driver = {
static int __init omap_nand_init(void)
{
- printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME);
+ pr_info("%s driver initializing\n", DRIVER_NAME);
- /* This check is required if driver is being
- * loaded run time as a module
- */
- if ((1 == use_dma) && (0 == use_prefetch)) {
- printk(KERN_INFO"Wrong parameters: 'use_dma' can not be 1 "
- "without use_prefetch'. Prefetch will not be"
- " used in either mode (mpu or dma)\n");
- }
return platform_driver_register(&omap_nand_driver);
}
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 3041d1f7ae3..38fb16771f8 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -319,7 +319,7 @@ static int tmio_nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
{
- struct mfd_cell *cell = dev_get_platdata(&dev->dev);
+ const struct mfd_cell *cell = mfd_get_cell(dev);
int ret;
if (cell->enable) {
@@ -363,7 +363,7 @@ static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
{
- struct mfd_cell *cell = dev_get_platdata(&dev->dev);
+ const struct mfd_cell *cell = mfd_get_cell(dev);
tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE);
if (cell->disable)
@@ -372,8 +372,7 @@ static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
static int tmio_probe(struct platform_device *dev)
{
- struct mfd_cell *cell = dev_get_platdata(&dev->dev);
- struct tmio_nand_data *data = cell->driver_data;
+ struct tmio_nand_data *data = mfd_get_data(dev);
struct resource *fcr = platform_get_resource(dev,
IORESOURCE_MEM, 0);
struct resource *ccr = platform_get_resource(dev,
@@ -516,7 +515,7 @@ static int tmio_remove(struct platform_device *dev)
#ifdef CONFIG_PM
static int tmio_suspend(struct platform_device *dev, pm_message_t state)
{
- struct mfd_cell *cell = dev_get_platdata(&dev->dev);
+ const struct mfd_cell *cell = mfd_get_cell(dev);
if (cell->suspend)
cell->suspend(dev);
@@ -527,7 +526,7 @@ static int tmio_suspend(struct platform_device *dev, pm_message_t state)
static int tmio_resume(struct platform_device *dev)
{
- struct mfd_cell *cell = dev_get_platdata(&dev->dev);
+ const struct mfd_cell *cell = mfd_get_cell(dev);
/* FIXME - is this required or merely another attack of the broken
* SHARP platform? Looks suspicious.
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index 4dbd0f58eeb..4f426195f8d 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -32,7 +32,7 @@ config MTD_ONENAND_OMAP2
config MTD_ONENAND_SAMSUNG
tristate "OneNAND on Samsung SOC controller support"
- depends on ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_S5PV310
+ depends on ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS4
help
Support for a OneNAND flash device connected to an Samsung SOC.
S3C64XX/S5PC100 use command mapping method.
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index c849cacf4b2..14a49abe057 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -63,7 +63,7 @@ struct omap2_onenand {
struct completion dma_done;
int dma_channel;
int freq;
- int (*setup)(void __iomem *base, int freq);
+ int (*setup)(void __iomem *base, int *freq_ptr);
struct regulator *regulator;
};
@@ -148,11 +148,9 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
wait_err("controller error", state, ctrl, intr);
return -EIO;
}
- if ((intr & intr_flags) != intr_flags) {
- wait_err("timeout", state, ctrl, intr);
- return -EIO;
- }
- return 0;
+ if ((intr & intr_flags) == intr_flags)
+ return 0;
+ /* Continue in wait for interrupt branch */
}
if (state != FL_READING) {
@@ -581,7 +579,7 @@ static int __adjust_timing(struct device *dev, void *data)
/* DMA is not in use so this is all that is needed */
/* Revisit for OMAP3! */
- ret = c->setup(c->onenand.base, c->freq);
+ ret = c->setup(c->onenand.base, &c->freq);
return ret;
}
@@ -673,7 +671,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
}
if (pdata->onenand_setup != NULL) {
- r = pdata->onenand_setup(c->onenand.base, c->freq);
+ r = pdata->onenand_setup(c->onenand.base, &c->freq);
if (r < 0) {
dev_err(&pdev->dev, "Onenand platform setup failed: "
"%d\n", r);
@@ -718,8 +716,8 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
}
dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
- "base %p\n", c->gpmc_cs, c->phys_base,
- c->onenand.base);
+ "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base,
+ c->onenand.base, c->freq);
c->pdev = pdev;
c->mtd.name = dev_name(&pdev->dev);
@@ -754,24 +752,6 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
if ((r = onenand_scan(&c->mtd, 1)) < 0)
goto err_release_regulator;
- switch ((c->onenand.version_id >> 4) & 0xf) {
- case 0:
- c->freq = 40;
- break;
- case 1:
- c->freq = 54;
- break;
- case 2:
- c->freq = 66;
- break;
- case 3:
- c->freq = 83;
- break;
- case 4:
- c->freq = 104;
- break;
- }
-
#ifdef CONFIG_MTD_PARTITIONS
r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0);
if (r > 0)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 03823327db2..9928115496e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1944,7 +1944,7 @@ config 68360_ENET
config FEC
bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
- MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 || SOC_IMX28
+ IMX_HAVE_PLATFORM_FEC || SOC_IMX28
select PHYLIB
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 366f5cc050a..102b16c6cc9 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -15,6 +15,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/netdevice.h>
#include <linux/can.h>
@@ -1643,7 +1644,7 @@ static int __devinit ican3_probe(struct platform_device *pdev)
struct device *dev;
int ret;
- pdata = pdev->dev.platform_data;
+ pdata = mfd_get_data(pdev);
if (!pdata)
return -ENXIO;
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index b79d7e1555d..db0290f05bd 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -1163,15 +1163,11 @@ static int ethoc_resume(struct platform_device *pdev)
# define ethoc_resume NULL
#endif
-#ifdef CONFIG_OF
static struct of_device_id ethoc_match[] = {
- {
- .compatible = "opencores,ethoc",
- },
+ { .compatible = "opencores,ethoc", },
{},
};
MODULE_DEVICE_TABLE(of, ethoc_match);
-#endif
static struct platform_driver ethoc_driver = {
.probe = ethoc_probe,
@@ -1181,9 +1177,7 @@ static struct platform_driver ethoc_driver = {
.driver = {
.name = "ethoc",
.owner = THIS_MODULE,
-#ifdef CONFIG_OF
.of_match_table = ethoc_match,
-#endif
},
};
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index 928b2b83cef..efd44afeae8 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -26,6 +26,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -1145,7 +1146,7 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
struct resource *iomem;
struct net_device *netdev;
struct ks8842_adapter *adapter;
- struct ks8842_platform_data *pdata = pdev->dev.platform_data;
+ struct ks8842_platform_data *pdata = mfd_get_data(pdev);
u16 id;
unsigned i;
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 5f3b97668e6..8f128541656 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -340,7 +340,7 @@ next:
static const struct driver_info eem_info = {
.description = "CDC EEM Device",
- .flags = FLAG_ETHER,
+ .flags = FLAG_ETHER | FLAG_POINTTOPOINT,
.bind = eem_bind,
.rx_fixup = eem_rx_fixup,
.tx_fixup = eem_tx_fixup,
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 1189d726419..f82284effd9 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -450,7 +450,7 @@ static int cdc_manage_power(struct usbnet *dev, int on)
static const struct driver_info cdc_info = {
.description = "CDC Ethernet Device",
- .flags = FLAG_ETHER,
+ .flags = FLAG_ETHER | FLAG_POINTTOPOINT,
// .check_connect = cdc_check_connect,
.bind = cdc_bind,
.unbind = usbnet_cdc_unbind,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 7113168473c..967371f0445 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1237,7 +1237,7 @@ static int cdc_ncm_manage_power(struct usbnet *dev, int status)
static const struct driver_info cdc_ncm_info = {
.description = "CDC NCM",
- .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET,
+ .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
.check_connect = cdc_ncm_check_connect,
diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c
index ca39ace0b0e..fc5f13d47ad 100644
--- a/drivers/net/usb/cdc_subset.c
+++ b/drivers/net/usb/cdc_subset.c
@@ -89,6 +89,7 @@ static int always_connected (struct usbnet *dev)
static const struct driver_info ali_m5632_info = {
.description = "ALi M5632",
+ .flags = FLAG_POINTTOPOINT,
};
#endif
@@ -110,6 +111,7 @@ static const struct driver_info ali_m5632_info = {
static const struct driver_info an2720_info = {
.description = "AnchorChips/Cypress 2720",
+ .flags = FLAG_POINTTOPOINT,
// no reset available!
// no check_connect available!
@@ -132,6 +134,7 @@ static const struct driver_info an2720_info = {
static const struct driver_info belkin_info = {
.description = "Belkin, eTEK, or compatible",
+ .flags = FLAG_POINTTOPOINT,
};
#endif /* CONFIG_USB_BELKIN */
@@ -157,6 +160,7 @@ static const struct driver_info belkin_info = {
static const struct driver_info epson2888_info = {
.description = "Epson USB Device",
.check_connect = always_connected,
+ .flags = FLAG_POINTTOPOINT,
.in = 4, .out = 3,
};
@@ -173,6 +177,7 @@ static const struct driver_info epson2888_info = {
#define HAVE_HARDWARE
static const struct driver_info kc2190_info = {
.description = "KC Technology KC-190",
+ .flags = FLAG_POINTTOPOINT,
};
#endif /* CONFIG_USB_KC2190 */
@@ -200,16 +205,19 @@ static const struct driver_info kc2190_info = {
static const struct driver_info linuxdev_info = {
.description = "Linux Device",
.check_connect = always_connected,
+ .flags = FLAG_POINTTOPOINT,
};
static const struct driver_info yopy_info = {
.description = "Yopy",
.check_connect = always_connected,
+ .flags = FLAG_POINTTOPOINT,
};
static const struct driver_info blob_info = {
.description = "Boot Loader OBject",
.check_connect = always_connected,
+ .flags = FLAG_POINTTOPOINT,
};
#endif /* CONFIG_USB_ARMLINUX */
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index dcd57c37ef7..c4cfd1dea88 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -193,7 +193,7 @@ static int genelink_bind(struct usbnet *dev, struct usb_interface *intf)
static const struct driver_info genelink_info = {
.description = "Genesys GeneLink",
- .flags = FLAG_FRAMING_GL | FLAG_NO_SETINT,
+ .flags = FLAG_POINTTOPOINT | FLAG_FRAMING_GL | FLAG_NO_SETINT,
.bind = genelink_bind,
.rx_fixup = genelink_rx_fixup,
.tx_fixup = genelink_tx_fixup,
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index ba72a7281cb..01db4602a39 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -560,7 +560,7 @@ static int net1080_bind(struct usbnet *dev, struct usb_interface *intf)
static const struct driver_info net1080_info = {
.description = "NetChip TurboCONNECT",
- .flags = FLAG_FRAMING_NC,
+ .flags = FLAG_POINTTOPOINT | FLAG_FRAMING_NC,
.bind = net1080_bind,
.reset = net1080_reset,
.check_connect = net1080_check_connect,
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 08ad269f6b4..823c5375130 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -96,7 +96,7 @@ static int pl_reset(struct usbnet *dev)
static const struct driver_info prolific_info = {
.description = "Prolific PL-2301/PL-2302",
- .flags = FLAG_NO_SETINT,
+ .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT,
/* some PL-2302 versions seem to fail usb_set_interface() */
.reset = pl_reset,
};
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index dd8a4adf48c..5994a25c56a 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -573,7 +573,7 @@ EXPORT_SYMBOL_GPL(rndis_tx_fixup);
static const struct driver_info rndis_info = {
.description = "RNDIS device",
- .flags = FLAG_ETHER | FLAG_FRAMING_RN | FLAG_NO_SETINT,
+ .flags = FLAG_ETHER | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT,
.bind = rndis_bind,
.unbind = rndis_unbind,
.status = rndis_status,
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index bc86f4b6ecc..727874d9deb 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -49,6 +49,8 @@
struct smsc95xx_priv {
u32 mac_cr;
+ u32 hash_hi;
+ u32 hash_lo;
spinlock_t mac_cr_lock;
bool use_tx_csum;
bool use_rx_csum;
@@ -370,10 +372,11 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
{
struct usbnet *dev = netdev_priv(netdev);
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
- u32 hash_hi = 0;
- u32 hash_lo = 0;
unsigned long flags;
+ pdata->hash_hi = 0;
+ pdata->hash_lo = 0;
+
spin_lock_irqsave(&pdata->mac_cr_lock, flags);
if (dev->net->flags & IFF_PROMISC) {
@@ -394,13 +397,13 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
u32 bitnum = smsc95xx_hash(ha->addr);
u32 mask = 0x01 << (bitnum & 0x1F);
if (bitnum & 0x20)
- hash_hi |= mask;
+ pdata->hash_hi |= mask;
else
- hash_lo |= mask;
+ pdata->hash_lo |= mask;
}
netif_dbg(dev, drv, dev->net, "HASHH=0x%08X, HASHL=0x%08X\n",
- hash_hi, hash_lo);
+ pdata->hash_hi, pdata->hash_lo);
} else {
netif_dbg(dev, drv, dev->net, "receive own packets only\n");
pdata->mac_cr &=
@@ -410,8 +413,8 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
/* Initiate async writes, as we can't wait for completion here */
- smsc95xx_write_reg_async(dev, HASHH, &hash_hi);
- smsc95xx_write_reg_async(dev, HASHL, &hash_lo);
+ smsc95xx_write_reg_async(dev, HASHH, &pdata->hash_hi);
+ smsc95xx_write_reg_async(dev, HASHL, &pdata->hash_lo);
smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr);
}
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 95c41d56631..c5b6cfbfb9a 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1376,7 +1376,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
// else "eth%d" when there's reasonable doubt. userspace
// can rename the link if it knows better.
if ((dev->driver_info->flags & FLAG_ETHER) != 0 &&
- (net->dev_addr [0] & 0x02) == 0)
+ ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 ||
+ (net->dev_addr [0] & 0x02) == 0))
strcpy (net->name, "eth%d");
/* WLAN devices should always be named "wlan%d" */
if ((dev->driver_info->flags & FLAG_WLAN) != 0)
diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
index 3eb0b167b5b..241756e0e86 100644
--- a/drivers/net/usb/zaurus.c
+++ b/drivers/net/usb/zaurus.c
@@ -102,7 +102,7 @@ static int always_connected (struct usbnet *dev)
static const struct driver_info zaurus_sl5x00_info = {
.description = "Sharp Zaurus SL-5x00",
- .flags = FLAG_FRAMING_Z,
+ .flags = FLAG_POINTTOPOINT | FLAG_FRAMING_Z,
.check_connect = always_connected,
.bind = zaurus_bind,
.unbind = usbnet_cdc_unbind,
@@ -112,7 +112,7 @@ static const struct driver_info zaurus_sl5x00_info = {
static const struct driver_info zaurus_pxa_info = {
.description = "Sharp Zaurus, PXA-2xx based",
- .flags = FLAG_FRAMING_Z,
+ .flags = FLAG_POINTTOPOINT | FLAG_FRAMING_Z,
.check_connect = always_connected,
.bind = zaurus_bind,
.unbind = usbnet_cdc_unbind,
@@ -122,7 +122,7 @@ static const struct driver_info zaurus_pxa_info = {
static const struct driver_info olympus_mxl_info = {
.description = "Olympus R1000",
- .flags = FLAG_FRAMING_Z,
+ .flags = FLAG_POINTTOPOINT | FLAG_FRAMING_Z,
.check_connect = always_connected,
.bind = zaurus_bind,
.unbind = usbnet_cdc_unbind,
@@ -258,7 +258,7 @@ bad_desc:
static const struct driver_info bogus_mdlm_info = {
.description = "pseudo-MDLM (BLAN) device",
- .flags = FLAG_FRAMING_Z,
+ .flags = FLAG_POINTTOPOINT | FLAG_FRAMING_Z,
.check_connect = always_connected,
.tx_fixup = zaurus_tx_fixup,
.bind = blan_mdlm_bind,
diff --git a/drivers/net/wan/hd64570.c b/drivers/net/wan/hd64570.c
index a3ea27ce04f..33e920aa3a7 100644
--- a/drivers/net/wan/hd64570.c
+++ b/drivers/net/wan/hd64570.c
@@ -104,7 +104,7 @@ static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
}
-static inline u16 desc_offset(port_t *port, u16 desc, int transmit)
+static inline u16 hd_desc_offset(port_t *port, u16 desc, int transmit)
{
/* Descriptor offset always fits in 16 bits */
return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc);
@@ -116,10 +116,10 @@ static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc,
{
#ifdef PAGE0_ALWAYS_MAPPED
return (pkt_desc __iomem *)(win0base(port_to_card(port))
- + desc_offset(port, desc, transmit));
+ + hd_desc_offset(port, desc, transmit));
#else
return (pkt_desc __iomem *)(winbase(port_to_card(port))
- + desc_offset(port, desc, transmit));
+ + hd_desc_offset(port, desc, transmit));
#endif
}
@@ -169,7 +169,7 @@ static void sca_init_port(port_t *port)
for (i = 0; i < buffs; i++) {
pkt_desc __iomem *desc = desc_address(port, i, transmit);
- u16 chain_off = desc_offset(port, i + 1, transmit);
+ u16 chain_off = hd_desc_offset(port, i + 1, transmit);
u32 buff_off = buffer_offset(port, i, transmit);
writew(chain_off, &desc->cp);
@@ -187,12 +187,12 @@ static void sca_init_port(port_t *port)
/* current desc addr */
sca_out(0, dmac + CPB, card); /* pointer base */
- sca_outw(desc_offset(port, 0, transmit), dmac + CDAL, card);
+ sca_outw(hd_desc_offset(port, 0, transmit), dmac + CDAL, card);
if (!transmit)
- sca_outw(desc_offset(port, buffs - 1, transmit),
+ sca_outw(hd_desc_offset(port, buffs - 1, transmit),
dmac + EDAL, card);
else
- sca_outw(desc_offset(port, 0, transmit), dmac + EDAL,
+ sca_outw(hd_desc_offset(port, 0, transmit), dmac + EDAL,
card);
/* clear frame end interrupt counter */
@@ -305,7 +305,7 @@ static inline void sca_rx_intr(port_t *port)
dev->stats.rx_over_errors++;
while (1) {
- u32 desc_off = desc_offset(port, port->rxin, 0);
+ u32 desc_off = hd_desc_offset(port, port->rxin, 0);
pkt_desc __iomem *desc;
u32 cda = sca_inw(dmac + CDAL, card);
@@ -359,7 +359,7 @@ static inline void sca_tx_intr(port_t *port)
while (1) {
pkt_desc __iomem *desc;
- u32 desc_off = desc_offset(port, port->txlast, 1);
+ u32 desc_off = hd_desc_offset(port, port->txlast, 1);
u32 cda = sca_inw(dmac + CDAL, card);
if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
break; /* Transmitter is/will_be sending this frame */
@@ -660,7 +660,7 @@ static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
writeb(ST_TX_EOM, &desc->stat);
port->txin = next_desc(port, port->txin, 1);
- sca_outw(desc_offset(port, port->txin, 1),
+ sca_outw(hd_desc_offset(port, port->txin, 1),
get_dmac_tx(port) + EDAL, card);
sca_out(DSR_DE, DSR_TX(phy_node(port)), card); /* Enable TX DMA */
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index e305274f83f..5c801ea8cc0 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -87,7 +87,7 @@ static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
}
-static inline u16 desc_offset(port_t *port, u16 desc, int transmit)
+static inline u16 hd_desc_offset(port_t *port, u16 desc, int transmit)
{
/* Descriptor offset always fits in 16 bits */
return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc);
@@ -98,7 +98,7 @@ static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc,
int transmit)
{
return (pkt_desc __iomem *)(port->card->rambase +
- desc_offset(port, desc, transmit));
+ hd_desc_offset(port, desc, transmit));
}
@@ -143,7 +143,7 @@ static void sca_init_port(port_t *port)
for (i = 0; i < buffs; i++) {
pkt_desc __iomem *desc = desc_address(port, i, transmit);
- u16 chain_off = desc_offset(port, i + 1, transmit);
+ u16 chain_off = hd_desc_offset(port, i + 1, transmit);
u32 buff_off = buffer_offset(port, i, transmit);
writel(chain_off, &desc->cp);
@@ -162,11 +162,11 @@ static void sca_init_port(port_t *port)
sca_out(DCR_ABORT, DCR_TX(port->chan), card);
/* current desc addr */
- sca_outl(desc_offset(port, 0, 0), dmac_rx + CDAL, card);
- sca_outl(desc_offset(port, card->tx_ring_buffers - 1, 0),
+ sca_outl(hd_desc_offset(port, 0, 0), dmac_rx + CDAL, card);
+ sca_outl(hd_desc_offset(port, card->tx_ring_buffers - 1, 0),
dmac_rx + EDAL, card);
- sca_outl(desc_offset(port, 0, 1), dmac_tx + CDAL, card);
- sca_outl(desc_offset(port, 0, 1), dmac_tx + EDAL, card);
+ sca_outl(hd_desc_offset(port, 0, 1), dmac_tx + CDAL, card);
+ sca_outl(hd_desc_offset(port, 0, 1), dmac_tx + EDAL, card);
/* clear frame end interrupt counter */
sca_out(DCR_CLEAR_EOF, DCR_RX(port->chan), card);
@@ -249,7 +249,7 @@ static inline int sca_rx_done(port_t *port, int budget)
dev->stats.rx_over_errors++;
while (received < budget) {
- u32 desc_off = desc_offset(port, port->rxin, 0);
+ u32 desc_off = hd_desc_offset(port, port->rxin, 0);
pkt_desc __iomem *desc;
u32 cda = sca_inl(dmac + CDAL, card);
@@ -590,7 +590,7 @@ static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
writeb(ST_TX_EOM, &desc->stat);
port->txin = (port->txin + 1) % card->tx_ring_buffers;
- sca_outl(desc_offset(port, port->txin, 1),
+ sca_outl(hd_desc_offset(port, port->txin, 1),
get_dmac_tx(port) + EDAL, card);
sca_out(DSR_DE, DSR_TX(port->chan), card); /* Enable TX DMA */
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 0e65bce457d..35ce7b0f4a6 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -3,7 +3,7 @@ menuconfig WL12XX_MENU
depends on MAC80211 && EXPERIMENTAL
---help---
This will enable TI wl12xx driver support for the following chips:
- wl1271 and wl1273.
+ wl1271, wl1273, wl1281 and wl1283.
The drivers make use of the mac80211 stack.
config WL12XX
@@ -54,7 +54,7 @@ config WL12XX_SDIO
config WL12XX_SDIO_TEST
tristate "TI wl12xx SDIO testing support"
- depends on WL12XX && MMC
+ depends on WL12XX && MMC && WL12XX_SDIO
default n
---help---
This module adds support for the SDIO bus testing with the
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c
index cc4068d2b4a..b1b5139139e 100644
--- a/drivers/net/wireless/wl12xx/acx.c
+++ b/drivers/net/wireless/wl12xx/acx.c
@@ -751,10 +751,10 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
return 0;
}
-int wl1271_acx_rate_policies(struct wl1271 *wl)
+int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
{
- struct acx_rate_policy *acx;
- struct conf_tx_rate_class *c = &wl->conf.tx.rc_conf;
+ struct acx_sta_rate_policy *acx;
+ struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf;
int idx = 0;
int ret = 0;
@@ -783,6 +783,10 @@ int wl1271_acx_rate_policies(struct wl1271 *wl)
acx->rate_class_cnt = cpu_to_le32(ACX_TX_RATE_POLICY_CNT);
+ wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x",
+ acx->rate_class[ACX_TX_BASIC_RATE].enabled_rates,
+ acx->rate_class[ACX_TX_AP_FULL_RATE].enabled_rates);
+
ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("Setting of rate policies failed: %d", ret);
@@ -794,6 +798,38 @@ out:
return ret;
}
+int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
+ u8 idx)
+{
+ struct acx_ap_rate_policy *acx;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_ACX, "acx ap rate policy");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->rate_policy.enabled_rates = cpu_to_le32(c->enabled_rates);
+ acx->rate_policy.short_retry_limit = c->short_retry_limit;
+ acx->rate_policy.long_retry_limit = c->long_retry_limit;
+ acx->rate_policy.aflags = c->aflags;
+
+ acx->rate_policy_idx = cpu_to_le32(idx);
+
+ ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("Setting of ap rate policy failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
u8 aifsn, u16 txop)
{
@@ -915,9 +951,10 @@ out:
return ret;
}
-int wl1271_acx_mem_cfg(struct wl1271 *wl)
+int wl1271_acx_ap_mem_cfg(struct wl1271 *wl)
{
- struct wl1271_acx_config_memory *mem_conf;
+ struct wl1271_acx_ap_config_memory *mem_conf;
+ struct conf_memory_settings *mem;
int ret;
wl1271_debug(DEBUG_ACX, "wl1271 mem cfg");
@@ -928,11 +965,21 @@ int wl1271_acx_mem_cfg(struct wl1271 *wl)
goto out;
}
+ if (wl->chip.id == CHIP_ID_1283_PG20)
+ /*
+ * FIXME: The 128x AP FW does not yet support dynamic memory.
+ * Use the base memory configuration for 128x for now. This
+ * should be fine tuned in the future.
+ */
+ mem = &wl->conf.mem_wl128x;
+ else
+ mem = &wl->conf.mem_wl127x;
+
/* memory config */
- mem_conf->num_stations = DEFAULT_NUM_STATIONS;
- mem_conf->rx_mem_block_num = ACX_RX_MEM_BLOCKS;
- mem_conf->tx_min_mem_block_num = ACX_TX_MIN_MEM_BLOCKS;
- mem_conf->num_ssid_profiles = ACX_NUM_SSID_PROFILES;
+ mem_conf->num_stations = mem->num_stations;
+ mem_conf->rx_mem_block_num = mem->rx_block_num;
+ mem_conf->tx_min_mem_block_num = mem->tx_min_block_num;
+ mem_conf->num_ssid_profiles = mem->ssid_profiles;
mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS);
ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
@@ -947,13 +994,77 @@ out:
return ret;
}
-int wl1271_acx_init_mem_config(struct wl1271 *wl)
+int wl1271_acx_sta_mem_cfg(struct wl1271 *wl)
{
+ struct wl1271_acx_sta_config_memory *mem_conf;
+ struct conf_memory_settings *mem;
int ret;
- ret = wl1271_acx_mem_cfg(wl);
- if (ret < 0)
- return ret;
+ wl1271_debug(DEBUG_ACX, "wl1271 mem cfg");
+
+ mem_conf = kzalloc(sizeof(*mem_conf), GFP_KERNEL);
+ if (!mem_conf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (wl->chip.id == CHIP_ID_1283_PG20)
+ mem = &wl->conf.mem_wl128x;
+ else
+ mem = &wl->conf.mem_wl127x;
+
+ /* memory config */
+ mem_conf->num_stations = mem->num_stations;
+ mem_conf->rx_mem_block_num = mem->rx_block_num;
+ mem_conf->tx_min_mem_block_num = mem->tx_min_block_num;
+ mem_conf->num_ssid_profiles = mem->ssid_profiles;
+ mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS);
+ mem_conf->dyn_mem_enable = mem->dynamic_memory;
+ mem_conf->tx_free_req = mem->min_req_tx_blocks;
+ mem_conf->rx_free_req = mem->min_req_rx_blocks;
+ mem_conf->tx_min = mem->tx_min;
+
+ ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
+ sizeof(*mem_conf));
+ if (ret < 0) {
+ wl1271_warning("wl1271 mem config failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(mem_conf);
+ return ret;
+}
+
+int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap)
+{
+ struct wl1271_acx_host_config_bitmap *bitmap_conf;
+ int ret;
+
+ bitmap_conf = kzalloc(sizeof(*bitmap_conf), GFP_KERNEL);
+ if (!bitmap_conf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ bitmap_conf->host_cfg_bitmap = cpu_to_le32(host_cfg_bitmap);
+
+ ret = wl1271_cmd_configure(wl, ACX_HOST_IF_CFG_BITMAP,
+ bitmap_conf, sizeof(*bitmap_conf));
+ if (ret < 0) {
+ wl1271_warning("wl1271 bitmap config opt failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(bitmap_conf);
+
+ return ret;
+}
+
+int wl1271_acx_init_mem_config(struct wl1271 *wl)
+{
+ int ret;
wl->target_mem_map = kzalloc(sizeof(struct wl1271_acx_mem_map),
GFP_KERNEL);
@@ -1233,6 +1344,7 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
struct wl1271_acx_ht_capabilities *acx;
u8 mac_address[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
int ret = 0;
+ u32 ht_capabilites = 0;
wl1271_debug(DEBUG_ACX, "acx ht capabilities setting");
@@ -1244,27 +1356,26 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
/* Allow HT Operation ? */
if (allow_ht_operation) {
- acx->ht_capabilites =
+ ht_capabilites =
WL1271_ACX_FW_CAP_HT_OPERATION;
if (ht_cap->cap & IEEE80211_HT_CAP_GRN_FLD)
- acx->ht_capabilites |=
+ ht_capabilites |=
WL1271_ACX_FW_CAP_GREENFIELD_FRAME_FORMAT;
if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
- acx->ht_capabilites |=
+ ht_capabilites |=
WL1271_ACX_FW_CAP_SHORT_GI_FOR_20MHZ_PACKETS;
if (ht_cap->cap & IEEE80211_HT_CAP_LSIG_TXOP_PROT)
- acx->ht_capabilites |=
+ ht_capabilites |=
WL1271_ACX_FW_CAP_LSIG_TXOP_PROTECTION;
/* get data from A-MPDU parameters field */
acx->ampdu_max_length = ht_cap->ampdu_factor;
acx->ampdu_min_spacing = ht_cap->ampdu_density;
-
- memcpy(acx->mac_address, mac_address, ETH_ALEN);
- } else { /* HT operations are not allowed */
- acx->ht_capabilites = 0;
}
+ memcpy(acx->mac_address, mac_address, ETH_ALEN);
+ acx->ht_capabilites = cpu_to_le32(ht_capabilites);
+
ret = wl1271_cmd_configure(wl, ACX_PEER_HT_CAP, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx ht capabilities setting failed: %d", ret);
@@ -1293,7 +1404,8 @@ int wl1271_acx_set_ht_information(struct wl1271 *wl,
acx->ht_protection =
(u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION);
acx->rifs_mode = 0;
- acx->gf_protection = 0;
+ acx->gf_protection =
+ !!(ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
acx->ht_tx_burst_limit = 0;
acx->dual_cts_protection = 0;
@@ -1309,6 +1421,91 @@ out:
return ret;
}
+/* Configure BA session initiator/receiver parameters setting in the FW. */
+int wl1271_acx_set_ba_session(struct wl1271 *wl,
+ enum ieee80211_back_parties direction,
+ u8 tid_index, u8 policy)
+{
+ struct wl1271_acx_ba_session_policy *acx;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx ba session setting");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* ANY role */
+ acx->role_id = 0xff;
+ acx->tid = tid_index;
+ acx->enable = policy;
+ acx->ba_direction = direction;
+
+ switch (direction) {
+ case WLAN_BACK_INITIATOR:
+ acx->win_size = wl->conf.ht.tx_ba_win_size;
+ acx->inactivity_timeout = wl->conf.ht.inactivity_timeout;
+ break;
+ case WLAN_BACK_RECIPIENT:
+ acx->win_size = RX_BA_WIN_SIZE;
+ acx->inactivity_timeout = 0;
+ break;
+ default:
+ wl1271_error("Incorrect acx command id=%x\n", direction);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = wl1271_cmd_configure(wl,
+ ACX_BA_SESSION_POLICY_CFG,
+ acx,
+ sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx ba session setting failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+/* setup BA session receiver setting in the FW. */
+int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
+ bool enable)
+{
+ struct wl1271_acx_ba_receiver_setup *acx;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx ba receiver session setting");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Single link for now */
+ acx->link_id = 1;
+ acx->tid = tid_index;
+ acx->enable = enable;
+ acx->win_size = 0;
+ acx->ssn = ssn;
+
+ ret = wl1271_cmd_configure(wl, ACX_BA_SESSION_RX_SETUP, acx,
+ sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx ba receiver session failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime)
{
struct wl1271_acx_fw_tsf_information *tsf_info;
@@ -1334,3 +1531,146 @@ out:
kfree(tsf_info);
return ret;
}
+
+int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl)
+{
+ struct wl1271_acx_ap_max_tx_retry *acx = NULL;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx ap max tx retry");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx)
+ return -ENOMEM;
+
+ acx->max_tx_retry = cpu_to_le16(wl->conf.tx.max_tx_retries);
+
+ ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx ap max tx retry failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+int wl1271_acx_sta_max_tx_retry(struct wl1271 *wl)
+{
+ struct wl1271_acx_sta_max_tx_retry *acx = NULL;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx sta max tx retry");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx)
+ return -ENOMEM;
+
+ acx->max_tx_retry = wl->conf.tx.max_tx_retries;
+
+ ret = wl1271_cmd_configure(wl, ACX_CONS_TX_FAILURE, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx sta max tx retry failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+int wl1271_acx_config_ps(struct wl1271 *wl)
+{
+ struct wl1271_acx_config_ps *config_ps;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx config ps");
+
+ config_ps = kzalloc(sizeof(*config_ps), GFP_KERNEL);
+ if (!config_ps) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ config_ps->exit_retries = wl->conf.conn.psm_exit_retries;
+ config_ps->enter_retries = wl->conf.conn.psm_entry_retries;
+ config_ps->null_data_rate = cpu_to_le32(wl->basic_rate);
+
+ ret = wl1271_cmd_configure(wl, ACX_CONFIG_PS, config_ps,
+ sizeof(*config_ps));
+
+ if (ret < 0) {
+ wl1271_warning("acx config ps failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(config_ps);
+ return ret;
+}
+
+int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr)
+{
+ struct wl1271_acx_inconnection_sta *acx = NULL;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx set inconnaction sta %pM", addr);
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx)
+ return -ENOMEM;
+
+ memcpy(acx->addr, addr, ETH_ALEN);
+
+ ret = wl1271_cmd_configure(wl, ACX_UPDATE_INCONNECTION_STA_LIST,
+ acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx set inconnaction sta failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+int wl1271_acx_fm_coex(struct wl1271 *wl)
+{
+ struct wl1271_acx_fm_coex *acx;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx fm coex setting");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->enable = wl->conf.fm_coex.enable;
+ acx->swallow_period = wl->conf.fm_coex.swallow_period;
+ acx->n_divider_fref_set_1 = wl->conf.fm_coex.n_divider_fref_set_1;
+ acx->n_divider_fref_set_2 = wl->conf.fm_coex.n_divider_fref_set_2;
+ acx->m_divider_fref_set_1 =
+ cpu_to_le16(wl->conf.fm_coex.m_divider_fref_set_1);
+ acx->m_divider_fref_set_2 =
+ cpu_to_le16(wl->conf.fm_coex.m_divider_fref_set_2);
+ acx->coex_pll_stabilization_time =
+ cpu_to_le32(wl->conf.fm_coex.coex_pll_stabilization_time);
+ acx->ldo_stabilization_time =
+ cpu_to_le16(wl->conf.fm_coex.ldo_stabilization_time);
+ acx->fm_disturbed_band_margin =
+ wl->conf.fm_coex.fm_disturbed_band_margin;
+ acx->swallow_clk_diff = wl->conf.fm_coex.swallow_clk_diff;
+
+ ret = wl1271_cmd_configure(wl, ACX_FM_COEX_CFG, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx fm coex setting failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/wl12xx/acx.h
index 7bd8e4db4a7..2dde0346e95 100644
--- a/drivers/net/wireless/wl12xx/acx.h
+++ b/drivers/net/wireless/wl12xx/acx.h
@@ -133,7 +133,6 @@ enum {
#define DEFAULT_UCAST_PRIORITY 0
#define DEFAULT_RX_Q_PRIORITY 0
-#define DEFAULT_NUM_STATIONS 1
#define DEFAULT_RXQ_PRIORITY 0 /* low 0 .. 15 high */
#define DEFAULT_RXQ_TYPE 0x07 /* All frames, Data/Ctrl/Mgmt */
#define TRACE_BUFFER_MAX_SIZE 256
@@ -747,13 +746,23 @@ struct acx_rate_class {
#define ACX_TX_BASIC_RATE 0
#define ACX_TX_AP_FULL_RATE 1
#define ACX_TX_RATE_POLICY_CNT 2
-struct acx_rate_policy {
+struct acx_sta_rate_policy {
struct acx_header header;
__le32 rate_class_cnt;
struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES];
} __packed;
+
+#define ACX_TX_AP_MODE_MGMT_RATE 4
+#define ACX_TX_AP_MODE_BCST_RATE 5
+struct acx_ap_rate_policy {
+ struct acx_header header;
+
+ __le32 rate_policy_idx;
+ struct acx_rate_class rate_policy;
+} __packed;
+
struct acx_ac_cfg {
struct acx_header header;
u8 ac;
@@ -787,12 +796,9 @@ struct acx_tx_config_options {
__le16 tx_compl_threshold; /* number of packets */
} __packed;
-#define ACX_RX_MEM_BLOCKS 70
-#define ACX_TX_MIN_MEM_BLOCKS 40
#define ACX_TX_DESCRIPTORS 32
-#define ACX_NUM_SSID_PROFILES 1
-struct wl1271_acx_config_memory {
+struct wl1271_acx_ap_config_memory {
struct acx_header header;
u8 rx_mem_block_num;
@@ -802,6 +808,20 @@ struct wl1271_acx_config_memory {
__le32 total_tx_descriptors;
} __packed;
+struct wl1271_acx_sta_config_memory {
+ struct acx_header header;
+
+ u8 rx_mem_block_num;
+ u8 tx_min_mem_block_num;
+ u8 num_stations;
+ u8 num_ssid_profiles;
+ __le32 total_tx_descriptors;
+ u8 dyn_mem_enable;
+ u8 tx_free_req;
+ u8 rx_free_req;
+ u8 tx_min;
+} __packed;
+
struct wl1271_acx_mem_map {
struct acx_header header;
@@ -919,6 +939,16 @@ struct wl1271_acx_keep_alive_config {
u8 padding;
} __packed;
+#define HOST_IF_CFG_RX_FIFO_ENABLE BIT(0)
+#define HOST_IF_CFG_TX_EXTRA_BLKS_SWAP BIT(1)
+#define HOST_IF_CFG_TX_PAD_TO_SDIO_BLK BIT(3)
+
+struct wl1271_acx_host_config_bitmap {
+ struct acx_header header;
+
+ __le32 host_cfg_bitmap;
+} __packed;
+
enum {
WL1271_ACX_TRIG_TYPE_LEVEL = 0,
WL1271_ACX_TRIG_TYPE_EDGE,
@@ -1051,6 +1081,59 @@ struct wl1271_acx_ht_information {
u8 padding[3];
} __packed;
+#define RX_BA_WIN_SIZE 8
+
+struct wl1271_acx_ba_session_policy {
+ struct acx_header header;
+ /*
+ * Specifies role Id, Range 0-7, 0xFF means ANY role.
+ * Future use. For now this field is irrelevant
+ */
+ u8 role_id;
+ /*
+ * Specifies Link Id, Range 0-31, 0xFF means ANY Link Id.
+ * Not applicable if Role Id is set to ANY.
+ */
+ u8 link_id;
+
+ u8 tid;
+
+ u8 enable;
+
+ /* Windows size in number of packets */
+ u16 win_size;
+
+ /*
+ * As initiator inactivity timeout in time units(TU) of 1024us.
+ * As receiver reserved
+ */
+ u16 inactivity_timeout;
+
+ /* Initiator = 1/Receiver = 0 */
+ u8 ba_direction;
+
+ u8 padding[3];
+} __packed;
+
+struct wl1271_acx_ba_receiver_setup {
+ struct acx_header header;
+
+ /* Specifies Link Id, Range 0-31, 0xFF means ANY Link Id */
+ u8 link_id;
+
+ u8 tid;
+
+ u8 enable;
+
+ u8 padding[1];
+
+ /* Windows size in number of packets */
+ u16 win_size;
+
+ /* BA session starting sequence number. RANGE 0-FFF */
+ u16 ssn;
+} __packed;
+
struct wl1271_acx_fw_tsf_information {
struct acx_header header;
@@ -1062,6 +1145,99 @@ struct wl1271_acx_fw_tsf_information {
u8 padding[3];
} __packed;
+struct wl1271_acx_ap_max_tx_retry {
+ struct acx_header header;
+
+ /*
+ * the number of frames transmission failures before
+ * issuing the aging event.
+ */
+ __le16 max_tx_retry;
+ u8 padding_1[2];
+} __packed;
+
+struct wl1271_acx_sta_max_tx_retry {
+ struct acx_header header;
+
+ u8 max_tx_retry;
+ u8 padding_1[3];
+} __packed;
+
+struct wl1271_acx_config_ps {
+ struct acx_header header;
+
+ u8 exit_retries;
+ u8 enter_retries;
+ u8 padding[2];
+ __le32 null_data_rate;
+} __packed;
+
+struct wl1271_acx_inconnection_sta {
+ struct acx_header header;
+
+ u8 addr[ETH_ALEN];
+ u8 padding1[2];
+} __packed;
+
+/*
+ * ACX_FM_COEX_CFG
+ * set the FM co-existence parameters.
+ */
+struct wl1271_acx_fm_coex {
+ struct acx_header header;
+ /* enable(1) / disable(0) the FM Coex feature */
+ u8 enable;
+ /*
+ * Swallow period used in COEX PLL swallowing mechanism.
+ * 0xFF = use FW default
+ */
+ u8 swallow_period;
+ /*
+ * The N divider used in COEX PLL swallowing mechanism for Fref of
+ * 38.4/19.2 Mhz. 0xFF = use FW default
+ */
+ u8 n_divider_fref_set_1;
+ /*
+ * The N divider used in COEX PLL swallowing mechanism for Fref of
+ * 26/52 Mhz. 0xFF = use FW default
+ */
+ u8 n_divider_fref_set_2;
+ /*
+ * The M divider used in COEX PLL swallowing mechanism for Fref of
+ * 38.4/19.2 Mhz. 0xFFFF = use FW default
+ */
+ __le16 m_divider_fref_set_1;
+ /*
+ * The M divider used in COEX PLL swallowing mechanism for Fref of
+ * 26/52 Mhz. 0xFFFF = use FW default
+ */
+ __le16 m_divider_fref_set_2;
+ /*
+ * The time duration in uSec required for COEX PLL to stabilize.
+ * 0xFFFFFFFF = use FW default
+ */
+ __le32 coex_pll_stabilization_time;
+ /*
+ * The time duration in uSec required for LDO to stabilize.
+ * 0xFFFFFFFF = use FW default
+ */
+ __le16 ldo_stabilization_time;
+ /*
+ * The disturbed frequency band margin around the disturbed frequency
+ * center (single sided).
+ * For example, if 2 is configured, the following channels will be
+ * considered disturbed channel:
+ * 80 +- 0.1 MHz, 91 +- 0.1 MHz, 98 +- 0.1 MHz, 102 +- 0.1 MH
+ * 0xFF = use FW default
+ */
+ u8 fm_disturbed_band_margin;
+ /*
+ * The swallow clock difference of the swallowing mechanism.
+ * 0xFF = use FW default
+ */
+ u8 swallow_clk_diff;
+} __packed;
+
enum {
ACX_WAKE_UP_CONDITIONS = 0x0002,
ACX_MEM_CFG = 0x0003,
@@ -1091,6 +1267,7 @@ enum {
ACX_BCN_DTIM_OPTIONS = 0x0031,
ACX_SG_ENABLE = 0x0032,
ACX_SG_CFG = 0x0033,
+ ACX_FM_COEX_CFG = 0x0034,
ACX_BEACON_FILTER_TABLE = 0x0038,
ACX_ARP_IP_FILTER = 0x0039,
ACX_ROAMING_STATISTICS_TBL = 0x003B,
@@ -1113,22 +1290,24 @@ enum {
ACX_RSSI_SNR_WEIGHTS = 0x0052,
ACX_KEEP_ALIVE_MODE = 0x0053,
ACX_SET_KEEP_ALIVE_CONFIG = 0x0054,
- ACX_BA_SESSION_RESPONDER_POLICY = 0x0055,
- ACX_BA_SESSION_INITIATOR_POLICY = 0x0056,
+ ACX_BA_SESSION_POLICY_CFG = 0x0055,
+ ACX_BA_SESSION_RX_SETUP = 0x0056,
ACX_PEER_HT_CAP = 0x0057,
ACX_HT_BSS_OPERATION = 0x0058,
ACX_COEX_ACTIVITY = 0x0059,
ACX_SET_DCO_ITRIM_PARAMS = 0x0061,
+ ACX_GEN_FW_CMD = 0x0070,
+ ACX_HOST_IF_CFG_BITMAP = 0x0071,
+ ACX_MAX_TX_FAILURE = 0x0072,
+ ACX_UPDATE_INCONNECTION_STA_LIST = 0x0073,
DOT11_RX_MSDU_LIFE_TIME = 0x1004,
DOT11_CUR_TX_PWR = 0x100D,
DOT11_RX_DOT11_MODE = 0x1012,
DOT11_RTS_THRESHOLD = 0x1013,
DOT11_GROUP_ADDRESS_TBL = 0x1014,
ACX_PM_CONFIG = 0x1016,
-
- MAX_DOT11_IE = DOT11_GROUP_ADDRESS_TBL,
-
- MAX_IE = 0xFFFF
+ ACX_CONFIG_PS = 0x1017,
+ ACX_CONFIG_HANGOVER = 0x1018,
};
@@ -1160,7 +1339,9 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
int wl1271_acx_cts_protect(struct wl1271 *wl,
enum acx_ctsprotect_type ctsprotect);
int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
-int wl1271_acx_rate_policies(struct wl1271 *wl);
+int wl1271_acx_sta_rate_policies(struct wl1271 *wl);
+int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
+ u8 idx);
int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
u8 aifsn, u16 txop);
int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
@@ -1168,8 +1349,10 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
u32 apsd_conf0, u32 apsd_conf1);
int wl1271_acx_frag_threshold(struct wl1271 *wl, u16 frag_threshold);
int wl1271_acx_tx_config_options(struct wl1271 *wl);
-int wl1271_acx_mem_cfg(struct wl1271 *wl);
+int wl1271_acx_ap_mem_cfg(struct wl1271 *wl);
+int wl1271_acx_sta_mem_cfg(struct wl1271 *wl);
int wl1271_acx_init_mem_config(struct wl1271 *wl);
+int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap);
int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
int wl1271_acx_smart_reflex(struct wl1271 *wl);
int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
@@ -1185,6 +1368,16 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
bool allow_ht_operation);
int wl1271_acx_set_ht_information(struct wl1271 *wl,
u16 ht_operation_mode);
+int wl1271_acx_set_ba_session(struct wl1271 *wl,
+ enum ieee80211_back_parties direction,
+ u8 tid_index, u8 policy);
+int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
+ bool enable);
int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
+int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl);
+int wl1271_acx_sta_max_tx_retry(struct wl1271 *wl);
+int wl1271_acx_config_ps(struct wl1271 *wl);
+int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
+int wl1271_acx_fm_coex(struct wl1271 *wl);
#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c
index 4df04f84d7f..d263ebb6f97 100644
--- a/drivers/net/wireless/wl12xx/boot.c
+++ b/drivers/net/wireless/wl12xx/boot.c
@@ -22,12 +22,14 @@
*/
#include <linux/slab.h>
+#include <linux/wl12xx.h>
#include "acx.h"
#include "reg.h"
#include "boot.h"
#include "io.h"
#include "event.h"
+#include "rx.h"
static struct wl1271_partition_set part_table[PART_TABLE_LEN] = {
[PART_DOWN] = {
@@ -100,6 +102,22 @@ static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
wl1271_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl);
}
+static void wl1271_parse_fw_ver(struct wl1271 *wl)
+{
+ int ret;
+
+ ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u",
+ &wl->chip.fw_ver[0], &wl->chip.fw_ver[1],
+ &wl->chip.fw_ver[2], &wl->chip.fw_ver[3],
+ &wl->chip.fw_ver[4]);
+
+ if (ret != 5) {
+ wl1271_warning("fw version incorrect value");
+ memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver));
+ return;
+ }
+}
+
static void wl1271_boot_fw_version(struct wl1271 *wl)
{
struct wl1271_static_data static_data;
@@ -107,11 +125,13 @@ static void wl1271_boot_fw_version(struct wl1271 *wl)
wl1271_read(wl, wl->cmd_box_addr, &static_data, sizeof(static_data),
false);
- strncpy(wl->chip.fw_ver, static_data.fw_version,
- sizeof(wl->chip.fw_ver));
+ strncpy(wl->chip.fw_ver_str, static_data.fw_version,
+ sizeof(wl->chip.fw_ver_str));
/* make sure the string is NULL-terminated */
- wl->chip.fw_ver[sizeof(wl->chip.fw_ver) - 1] = '\0';
+ wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0';
+
+ wl1271_parse_fw_ver(wl);
}
static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
@@ -224,31 +244,57 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
if (wl->nvs == NULL)
return -ENODEV;
- /*
- * FIXME: the LEGACY NVS image support (NVS's missing the 5GHz band
- * configurations) can be removed when those NVS files stop floating
- * around.
- */
- if (wl->nvs_len == sizeof(struct wl1271_nvs_file) ||
- wl->nvs_len == WL1271_INI_LEGACY_NVS_FILE_SIZE) {
- if (wl->nvs->general_params.dual_mode_select)
- wl->enable_11a = true;
- }
+ if (wl->chip.id == CHIP_ID_1283_PG20) {
+ struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs;
+
+ if (wl->nvs_len == sizeof(struct wl128x_nvs_file)) {
+ if (nvs->general_params.dual_mode_select)
+ wl->enable_11a = true;
+ } else {
+ wl1271_error("nvs size is not as expected: %zu != %zu",
+ wl->nvs_len,
+ sizeof(struct wl128x_nvs_file));
+ kfree(wl->nvs);
+ wl->nvs = NULL;
+ wl->nvs_len = 0;
+ return -EILSEQ;
+ }
- if (wl->nvs_len != sizeof(struct wl1271_nvs_file) &&
- (wl->nvs_len != WL1271_INI_LEGACY_NVS_FILE_SIZE ||
- wl->enable_11a)) {
- wl1271_error("nvs size is not as expected: %zu != %zu",
- wl->nvs_len, sizeof(struct wl1271_nvs_file));
- kfree(wl->nvs);
- wl->nvs = NULL;
- wl->nvs_len = 0;
- return -EILSEQ;
- }
+ /* only the first part of the NVS needs to be uploaded */
+ nvs_len = sizeof(nvs->nvs);
+ nvs_ptr = (u8 *)nvs->nvs;
- /* only the first part of the NVS needs to be uploaded */
- nvs_len = sizeof(wl->nvs->nvs);
- nvs_ptr = (u8 *)wl->nvs->nvs;
+ } else {
+ struct wl1271_nvs_file *nvs =
+ (struct wl1271_nvs_file *)wl->nvs;
+ /*
+ * FIXME: the LEGACY NVS image support (NVS's missing the 5GHz
+ * band configurations) can be removed when those NVS files stop
+ * floating around.
+ */
+ if (wl->nvs_len == sizeof(struct wl1271_nvs_file) ||
+ wl->nvs_len == WL1271_INI_LEGACY_NVS_FILE_SIZE) {
+ /* for now 11a is unsupported in AP mode */
+ if (wl->bss_type != BSS_TYPE_AP_BSS &&
+ nvs->general_params.dual_mode_select)
+ wl->enable_11a = true;
+ }
+
+ if (wl->nvs_len != sizeof(struct wl1271_nvs_file) &&
+ (wl->nvs_len != WL1271_INI_LEGACY_NVS_FILE_SIZE ||
+ wl->enable_11a)) {
+ wl1271_error("nvs size is not as expected: %zu != %zu",
+ wl->nvs_len, sizeof(struct wl1271_nvs_file));
+ kfree(wl->nvs);
+ wl->nvs = NULL;
+ wl->nvs_len = 0;
+ return -EILSEQ;
+ }
+
+ /* only the first part of the NVS needs to be uploaded */
+ nvs_len = sizeof(nvs->nvs);
+ nvs_ptr = (u8 *) nvs->nvs;
+ }
/* update current MAC address to NVS */
nvs_ptr[11] = wl->mac_addr[0];
@@ -298,10 +344,13 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
/*
* We've reached the first zero length, the first NVS table
* is located at an aligned offset which is at least 7 bytes further.
+ * NOTE: The wl->nvs->nvs element must be first, in order to
+ * simplify the casting, we assume it is at the beginning of
+ * the wl->nvs structure.
*/
- nvs_ptr = (u8 *)wl->nvs->nvs +
- ALIGN(nvs_ptr - (u8 *)wl->nvs->nvs + 7, 4);
- nvs_len -= nvs_ptr - (u8 *)wl->nvs->nvs;
+ nvs_ptr = (u8 *)wl->nvs +
+ ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4);
+ nvs_len -= nvs_ptr - (u8 *)wl->nvs;
/* Now we must set the partition correctly */
wl1271_set_partition(wl, &part_table[PART_WORK]);
@@ -429,7 +478,14 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
DISCONNECT_EVENT_COMPLETE_ID |
RSSI_SNR_TRIGGER_0_EVENT_ID |
PSPOLL_DELIVERY_FAILURE_EVENT_ID |
- SOFT_GEMINI_SENSE_EVENT_ID;
+ SOFT_GEMINI_SENSE_EVENT_ID |
+ MAX_TX_RETRY_EVENT_ID;
+
+ if (wl->bss_type == BSS_TYPE_AP_BSS)
+ wl->event_mask |= STA_REMOVE_COMPLETE_EVENT_ID |
+ INACTIVE_STA_EVENT_ID;
+ else
+ wl->event_mask |= DUMMY_PACKET_EVENT_ID;
ret = wl1271_event_unmask(wl);
if (ret < 0) {
@@ -464,26 +520,164 @@ static void wl1271_boot_hw_version(struct wl1271 *wl)
fuse = (fuse & PG_VER_MASK) >> PG_VER_OFFSET;
wl->hw_pg_ver = (s8)fuse;
+
+ if (((wl->hw_pg_ver & PG_MAJOR_VER_MASK) >> PG_MAJOR_VER_OFFSET) < 3)
+ wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION;
}
-/* uploads NVS and firmware */
-int wl1271_load_firmware(struct wl1271 *wl)
+static int wl128x_switch_tcxo_to_fref(struct wl1271 *wl)
{
- int ret = 0;
- u32 tmp, clk, pause;
+ u16 spare_reg;
+
+ /* Mask bits [2] & [8:4] in the sys_clk_cfg register */
+ spare_reg = wl1271_top_reg_read(wl, WL_SPARE_REG);
+ if (spare_reg == 0xFFFF)
+ return -EFAULT;
+ spare_reg |= (BIT(3) | BIT(5) | BIT(6));
+ wl1271_top_reg_write(wl, WL_SPARE_REG, spare_reg);
+
+ /* Enable FREF_CLK_REQ & mux MCS and coex PLLs to FREF */
+ wl1271_top_reg_write(wl, SYS_CLK_CFG_REG,
+ WL_CLK_REQ_TYPE_PG2 | MCS_PLL_CLK_SEL_FREF);
+
+ /* Delay execution for 15msec, to let the HW settle */
+ mdelay(15);
+
+ return 0;
+}
+
+static bool wl128x_is_tcxo_valid(struct wl1271 *wl)
+{
+ u16 tcxo_detection;
+
+ tcxo_detection = wl1271_top_reg_read(wl, TCXO_CLK_DETECT_REG);
+ if (tcxo_detection & TCXO_DET_FAILED)
+ return false;
+
+ return true;
+}
+
+static bool wl128x_is_fref_valid(struct wl1271 *wl)
+{
+ u16 fref_detection;
+
+ fref_detection = wl1271_top_reg_read(wl, FREF_CLK_DETECT_REG);
+ if (fref_detection & FREF_CLK_DETECT_FAIL)
+ return false;
+
+ return true;
+}
+
+static int wl128x_manually_configure_mcs_pll(struct wl1271 *wl)
+{
+ wl1271_top_reg_write(wl, MCS_PLL_M_REG, MCS_PLL_M_REG_VAL);
+ wl1271_top_reg_write(wl, MCS_PLL_N_REG, MCS_PLL_N_REG_VAL);
+ wl1271_top_reg_write(wl, MCS_PLL_CONFIG_REG, MCS_PLL_CONFIG_REG_VAL);
+
+ return 0;
+}
+
+static int wl128x_configure_mcs_pll(struct wl1271 *wl, int clk)
+{
+ u16 spare_reg;
+ u16 pll_config;
+ u8 input_freq;
+
+ /* Mask bits [3:1] in the sys_clk_cfg register */
+ spare_reg = wl1271_top_reg_read(wl, WL_SPARE_REG);
+ if (spare_reg == 0xFFFF)
+ return -EFAULT;
+ spare_reg |= BIT(2);
+ wl1271_top_reg_write(wl, WL_SPARE_REG, spare_reg);
+
+ /* Handle special cases of the TCXO clock */
+ if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_8 ||
+ wl->tcxo_clock == WL12XX_TCXOCLOCK_33_6)
+ return wl128x_manually_configure_mcs_pll(wl);
+
+ /* Set the input frequency according to the selected clock source */
+ input_freq = (clk & 1) + 1;
+
+ pll_config = wl1271_top_reg_read(wl, MCS_PLL_CONFIG_REG);
+ if (pll_config == 0xFFFF)
+ return -EFAULT;
+ pll_config |= (input_freq << MCS_SEL_IN_FREQ_SHIFT);
+ pll_config |= MCS_PLL_ENABLE_HP;
+ wl1271_top_reg_write(wl, MCS_PLL_CONFIG_REG, pll_config);
+
+ return 0;
+}
+
+/*
+ * WL128x has two clocks input - TCXO and FREF.
+ * TCXO is the main clock of the device, while FREF is used to sync
+ * between the GPS and the cellular modem.
+ * In cases where TCXO is 32.736MHz or 16.368MHz, the FREF will be used
+ * as the WLAN/BT main clock.
+ */
+static int wl128x_boot_clk(struct wl1271 *wl, int *selected_clock)
+{
+ u16 sys_clk_cfg;
+
+ /* For XTAL-only modes, FREF will be used after switching from TCXO */
+ if (wl->ref_clock == WL12XX_REFCLOCK_26_XTAL ||
+ wl->ref_clock == WL12XX_REFCLOCK_38_XTAL) {
+ if (!wl128x_switch_tcxo_to_fref(wl))
+ return -EINVAL;
+ goto fref_clk;
+ }
+
+ /* Query the HW, to determine which clock source we should use */
+ sys_clk_cfg = wl1271_top_reg_read(wl, SYS_CLK_CFG_REG);
+ if (sys_clk_cfg == 0xFFFF)
+ return -EINVAL;
+ if (sys_clk_cfg & PRCM_CM_EN_MUX_WLAN_FREF)
+ goto fref_clk;
+
+ /* If TCXO is either 32.736MHz or 16.368MHz, switch to FREF */
+ if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_368 ||
+ wl->tcxo_clock == WL12XX_TCXOCLOCK_32_736) {
+ if (!wl128x_switch_tcxo_to_fref(wl))
+ return -EINVAL;
+ goto fref_clk;
+ }
+
+ /* TCXO clock is selected */
+ if (!wl128x_is_tcxo_valid(wl))
+ return -EINVAL;
+ *selected_clock = wl->tcxo_clock;
+ goto config_mcs_pll;
+
+fref_clk:
+ /* FREF clock is selected */
+ if (!wl128x_is_fref_valid(wl))
+ return -EINVAL;
+ *selected_clock = wl->ref_clock;
+
+config_mcs_pll:
+ return wl128x_configure_mcs_pll(wl, *selected_clock);
+}
+
+static int wl127x_boot_clk(struct wl1271 *wl)
+{
+ u32 pause;
+ u32 clk;
wl1271_boot_hw_version(wl);
- if (wl->ref_clock == 0 || wl->ref_clock == 2 || wl->ref_clock == 4)
+ if (wl->ref_clock == CONF_REF_CLK_19_2_E ||
+ wl->ref_clock == CONF_REF_CLK_38_4_E ||
+ wl->ref_clock == CONF_REF_CLK_38_4_M_XTAL)
/* ref clk: 19.2/38.4/38.4-XTAL */
clk = 0x3;
- else if (wl->ref_clock == 1 || wl->ref_clock == 3)
+ else if (wl->ref_clock == CONF_REF_CLK_26_E ||
+ wl->ref_clock == CONF_REF_CLK_52_E)
/* ref clk: 26/52 */
clk = 0x5;
else
return -EINVAL;
- if (wl->ref_clock != 0) {
+ if (wl->ref_clock != CONF_REF_CLK_19_2_E) {
u16 val;
/* Set clock type (open drain) */
val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE);
@@ -513,6 +707,26 @@ int wl1271_load_firmware(struct wl1271 *wl)
pause |= WU_COUNTER_PAUSE_VAL;
wl1271_write32(wl, WU_COUNTER_PAUSE, pause);
+ return 0;
+}
+
+/* uploads NVS and firmware */
+int wl1271_load_firmware(struct wl1271 *wl)
+{
+ int ret = 0;
+ u32 tmp, clk;
+ int selected_clock = -1;
+
+ if (wl->chip.id == CHIP_ID_1283_PG20) {
+ ret = wl128x_boot_clk(wl, &selected_clock);
+ if (ret < 0)
+ goto out;
+ } else {
+ ret = wl127x_boot_clk(wl);
+ if (ret < 0)
+ goto out;
+ }
+
/* Continue the ELP wake up sequence */
wl1271_write32(wl, WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
udelay(500);
@@ -528,7 +742,12 @@ int wl1271_load_firmware(struct wl1271 *wl)
wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
- clk |= (wl->ref_clock << 1) << 4;
+ if (wl->chip.id == CHIP_ID_1283_PG20) {
+ clk |= ((selected_clock & 0x3) << 1) << 4;
+ } else {
+ clk |= (wl->ref_clock << 1) << 4;
+ }
+
wl1271_write32(wl, DRPW_SCRATCH_START, clk);
wl1271_set_partition(wl, &part_table[PART_WORK]);
@@ -558,16 +777,12 @@ int wl1271_load_firmware(struct wl1271 *wl)
/* 6. read the EEPROM parameters */
tmp = wl1271_read32(wl, SCR_PAD2);
- ret = wl1271_boot_write_irq_polarity(wl);
- if (ret < 0)
- goto out;
-
- wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
- WL1271_ACX_ALL_EVENTS_VECTOR);
-
/* WL1271: The reference driver skips steps 7 to 10 (jumps directly
* to upload_fw) */
+ if (wl->chip.id == CHIP_ID_1283_PG20)
+ wl1271_top_reg_write(wl, SDIO_IO_DS, wl->conf.hci_io_ds);
+
ret = wl1271_boot_upload_firmware(wl);
if (ret < 0)
goto out;
@@ -591,12 +806,18 @@ int wl1271_boot(struct wl1271 *wl)
if (ret < 0)
goto out;
+ ret = wl1271_boot_write_irq_polarity(wl);
+ if (ret < 0)
+ goto out;
+
+ wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
+ WL1271_ACX_ALL_EVENTS_VECTOR);
+
/* Enable firmware interrupts now */
wl1271_boot_enable_interrupts(wl);
/* set the wl1271 default filters */
- wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
- wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
+ wl1271_set_default_filters(wl);
wl1271_event_mbox_config(wl);
diff --git a/drivers/net/wireless/wl12xx/boot.h b/drivers/net/wireless/wl12xx/boot.h
index d67dcffa31e..e8f8255bbab 100644
--- a/drivers/net/wireless/wl12xx/boot.h
+++ b/drivers/net/wireless/wl12xx/boot.h
@@ -59,6 +59,11 @@ struct wl1271_static_data {
#define PG_VER_MASK 0x3c
#define PG_VER_OFFSET 2
+#define PG_MAJOR_VER_MASK 0x3
+#define PG_MAJOR_VER_OFFSET 0x0
+#define PG_MINOR_VER_MASK 0xc
+#define PG_MINOR_VER_OFFSET 0x2
+
#define CMD_MBOX_ADDRESS 0x407B4
#define POLARITY_LOW BIT(1)
@@ -69,4 +74,56 @@ struct wl1271_static_data {
#define FREF_CLK_POLARITY_BITS 0xfffff8ff
#define CLK_REQ_OUTN_SEL 0x700
+/* PLL configuration algorithm for wl128x */
+#define SYS_CLK_CFG_REG 0x2200
+/* Bit[0] - 0-TCXO, 1-FREF */
+#define MCS_PLL_CLK_SEL_FREF BIT(0)
+/* Bit[3:2] - 01-TCXO, 10-FREF */
+#define WL_CLK_REQ_TYPE_FREF BIT(3)
+#define WL_CLK_REQ_TYPE_PG2 (BIT(3) | BIT(2))
+/* Bit[4] - 0-TCXO, 1-FREF */
+#define PRCM_CM_EN_MUX_WLAN_FREF BIT(4)
+
+#define TCXO_ILOAD_INT_REG 0x2264
+#define TCXO_CLK_DETECT_REG 0x2266
+
+#define TCXO_DET_FAILED BIT(4)
+
+#define FREF_ILOAD_INT_REG 0x2084
+#define FREF_CLK_DETECT_REG 0x2086
+#define FREF_CLK_DETECT_FAIL BIT(4)
+
+/* Use this reg for masking during driver access */
+#define WL_SPARE_REG 0x2320
+#define WL_SPARE_VAL BIT(2)
+/* Bit[6:5:3] - mask wl write SYS_CLK_CFG[8:5:2:4] */
+#define WL_SPARE_MASK_8526 (BIT(6) | BIT(5) | BIT(3))
+
+#define PLL_LOCK_COUNTERS_REG 0xD8C
+#define PLL_LOCK_COUNTERS_COEX 0x0F
+#define PLL_LOCK_COUNTERS_MCS 0xF0
+#define MCS_PLL_OVERRIDE_REG 0xD90
+#define MCS_PLL_CONFIG_REG 0xD92
+#define MCS_SEL_IN_FREQ_MASK 0x0070
+#define MCS_SEL_IN_FREQ_SHIFT 4
+#define MCS_PLL_CONFIG_REG_VAL 0x73
+#define MCS_PLL_ENABLE_HP (BIT(0) | BIT(1))
+
+#define MCS_PLL_M_REG 0xD94
+#define MCS_PLL_N_REG 0xD96
+#define MCS_PLL_M_REG_VAL 0xC8
+#define MCS_PLL_N_REG_VAL 0x07
+
+#define SDIO_IO_DS 0xd14
+
+/* SDIO/wSPI DS configuration values */
+enum {
+ HCI_IO_DS_8MA = 0,
+ HCI_IO_DS_4MA = 1, /* default */
+ HCI_IO_DS_6MA = 2,
+ HCI_IO_DS_2MA = 3,
+};
+
+/* end PLL configuration algorithm for wl128x */
+
#endif
diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/wl12xx/cmd.c
index 0106628aa5a..a9ffdd86f9b 100644
--- a/drivers/net/wireless/wl12xx/cmd.c
+++ b/drivers/net/wireless/wl12xx/cmd.c
@@ -36,6 +36,7 @@
#include "wl12xx_80211.h"
#include "cmd.h"
#include "event.h"
+#include "tx.h"
#define WL1271_CMD_FAST_POLL_COUNT 50
@@ -62,6 +63,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
cmd->status = 0;
WARN_ON(len % 4 != 0);
+ WARN_ON(test_bit(WL1271_FLAG_IN_ELP, &wl->flags));
wl1271_write(wl, wl->cmd_box_addr, buf, len, false);
@@ -108,7 +110,8 @@ out:
int wl1271_cmd_general_parms(struct wl1271 *wl)
{
struct wl1271_general_parms_cmd *gen_parms;
- struct wl1271_ini_general_params *gp = &wl->nvs->general_params;
+ struct wl1271_ini_general_params *gp =
+ &((struct wl1271_nvs_file *)wl->nvs)->general_params;
bool answer = false;
int ret;
@@ -126,6 +129,52 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
if (gp->tx_bip_fem_auto_detect)
answer = true;
+ /* Override the REF CLK from the NVS with the one from platform data */
+ gen_parms->general_params.ref_clock = wl->ref_clock;
+
+ ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
+ if (ret < 0) {
+ wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
+ goto out;
+ }
+
+ gp->tx_bip_fem_manufacturer =
+ gen_parms->general_params.tx_bip_fem_manufacturer;
+
+ wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
+ answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
+
+out:
+ kfree(gen_parms);
+ return ret;
+}
+
+int wl128x_cmd_general_parms(struct wl1271 *wl)
+{
+ struct wl128x_general_parms_cmd *gen_parms;
+ struct wl128x_ini_general_params *gp =
+ &((struct wl128x_nvs_file *)wl->nvs)->general_params;
+ bool answer = false;
+ int ret;
+
+ if (!wl->nvs)
+ return -ENODEV;
+
+ gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
+ if (!gen_parms)
+ return -ENOMEM;
+
+ gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM;
+
+ memcpy(&gen_parms->general_params, gp, sizeof(*gp));
+
+ if (gp->tx_bip_fem_auto_detect)
+ answer = true;
+
+ /* Replace REF and TCXO CLKs with the ones from platform data */
+ gen_parms->general_params.ref_clock = wl->ref_clock;
+ gen_parms->general_params.tcxo_ref_clock = wl->tcxo_clock;
+
ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
if (ret < 0) {
wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
@@ -145,8 +194,9 @@ out:
int wl1271_cmd_radio_parms(struct wl1271 *wl)
{
+ struct wl1271_nvs_file *nvs = (struct wl1271_nvs_file *)wl->nvs;
struct wl1271_radio_parms_cmd *radio_parms;
- struct wl1271_ini_general_params *gp = &wl->nvs->general_params;
+ struct wl1271_ini_general_params *gp = &nvs->general_params;
int ret;
if (!wl->nvs)
@@ -159,18 +209,18 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
/* 2.4GHz parameters */
- memcpy(&radio_parms->static_params_2, &wl->nvs->stat_radio_params_2,
+ memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2,
sizeof(struct wl1271_ini_band_params_2));
memcpy(&radio_parms->dyn_params_2,
- &wl->nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params,
+ &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params,
sizeof(struct wl1271_ini_fem_params_2));
/* 5GHz parameters */
memcpy(&radio_parms->static_params_5,
- &wl->nvs->stat_radio_params_5,
+ &nvs->stat_radio_params_5,
sizeof(struct wl1271_ini_band_params_5));
memcpy(&radio_parms->dyn_params_5,
- &wl->nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params,
+ &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params,
sizeof(struct wl1271_ini_fem_params_5));
wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
@@ -184,6 +234,50 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
return ret;
}
+int wl128x_cmd_radio_parms(struct wl1271 *wl)
+{
+ struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs;
+ struct wl128x_radio_parms_cmd *radio_parms;
+ struct wl128x_ini_general_params *gp = &nvs->general_params;
+ int ret;
+
+ if (!wl->nvs)
+ return -ENODEV;
+
+ radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL);
+ if (!radio_parms)
+ return -ENOMEM;
+
+ radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
+
+ /* 2.4GHz parameters */
+ memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2,
+ sizeof(struct wl128x_ini_band_params_2));
+ memcpy(&radio_parms->dyn_params_2,
+ &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params,
+ sizeof(struct wl128x_ini_fem_params_2));
+
+ /* 5GHz parameters */
+ memcpy(&radio_parms->static_params_5,
+ &nvs->stat_radio_params_5,
+ sizeof(struct wl128x_ini_band_params_5));
+ memcpy(&radio_parms->dyn_params_5,
+ &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params,
+ sizeof(struct wl128x_ini_fem_params_5));
+
+ radio_parms->fem_vendor_and_options = nvs->fem_vendor_and_options;
+
+ wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
+ radio_parms, sizeof(*radio_parms));
+
+ ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0);
+ if (ret < 0)
+ wl1271_warning("CMD_INI_FILE_RADIO_PARAM failed");
+
+ kfree(radio_parms);
+ return ret;
+}
+
int wl1271_cmd_ext_radio_parms(struct wl1271 *wl)
{
struct wl1271_ext_radio_parms_cmd *ext_radio_parms;
@@ -221,7 +315,7 @@ int wl1271_cmd_ext_radio_parms(struct wl1271 *wl)
* Poll the mailbox event field until any of the bits in the mask is set or a
* timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
*/
-static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
+static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl, u32 mask)
{
u32 events_vector, event;
unsigned long timeout;
@@ -230,7 +324,8 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
do {
if (time_after(jiffies, timeout)) {
- ieee80211_queue_work(wl->hw, &wl->recovery_work);
+ wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
+ (int)mask);
return -ETIMEDOUT;
}
@@ -248,6 +343,19 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
return 0;
}
+static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
+{
+ int ret;
+
+ ret = wl1271_cmd_wait_for_event_or_timeout(wl, mask);
+ if (ret != 0) {
+ ieee80211_queue_work(wl->hw, &wl->recovery_work);
+ return ret;
+ }
+
+ return 0;
+}
+
int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
{
struct wl1271_cmd_join *join;
@@ -271,6 +379,7 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
join->rx_filter_options = cpu_to_le32(wl->rx_filter);
join->bss_type = bss_type;
join->basic_rate_set = cpu_to_le32(wl->basic_rate_set);
+ join->supported_rate_set = cpu_to_le32(wl->rate_set);
if (wl->band == IEEE80211_BAND_5GHZ)
join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ;
@@ -288,6 +397,9 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
wl->tx_security_last_seq = 0;
wl->tx_security_seq = 0;
+ wl1271_debug(DEBUG_CMD, "cmd join: basic_rate_set=0x%x, rate_set=0x%x",
+ join->basic_rate_set, join->supported_rate_set);
+
ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join), 0);
if (ret < 0) {
wl1271_error("failed to initiate cmd join");
@@ -439,7 +551,7 @@ out:
return ret;
}
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, u32 rates, bool send)
+int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
{
struct wl1271_cmd_ps_params *ps_params = NULL;
int ret = 0;
@@ -453,10 +565,6 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, u32 rates, bool send)
}
ps_params->ps_mode = ps_mode;
- ps_params->send_null_data = send;
- ps_params->retries = wl->conf.conn.psm_entry_nullfunc_retries;
- ps_params->hang_over_period = wl->conf.conn.psm_entry_hangover_period;
- ps_params->null_data_rate = cpu_to_le32(rates);
ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
sizeof(*ps_params), 0);
@@ -490,8 +598,8 @@ int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
cmd->len = cpu_to_le16(buf_len);
cmd->template_type = template_id;
cmd->enabled_rates = cpu_to_le32(rates);
- cmd->short_retry_limit = wl->conf.tx.rc_conf.short_retry_limit;
- cmd->long_retry_limit = wl->conf.tx.rc_conf.long_retry_limit;
+ cmd->short_retry_limit = wl->conf.tx.tmpl_short_retry_limit;
+ cmd->long_retry_limit = wl->conf.tx.tmpl_long_retry_limit;
cmd->index = index;
if (buf)
@@ -659,15 +767,15 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
/* llc layer */
memcpy(tmpl.llc_hdr, rfc1042_header, sizeof(rfc1042_header));
- tmpl.llc_type = htons(ETH_P_ARP);
+ tmpl.llc_type = cpu_to_be16(ETH_P_ARP);
/* arp header */
arp_hdr = &tmpl.arp_hdr;
- arp_hdr->ar_hrd = htons(ARPHRD_ETHER);
- arp_hdr->ar_pro = htons(ETH_P_IP);
+ arp_hdr->ar_hrd = cpu_to_be16(ARPHRD_ETHER);
+ arp_hdr->ar_pro = cpu_to_be16(ETH_P_IP);
arp_hdr->ar_hln = ETH_ALEN;
arp_hdr->ar_pln = 4;
- arp_hdr->ar_op = htons(ARPOP_REPLY);
+ arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY);
/* arp payload */
memcpy(tmpl.sender_hw, wl->vif->addr, ETH_ALEN);
@@ -702,9 +810,9 @@ int wl1271_build_qos_null_data(struct wl1271 *wl)
wl->basic_rate);
}
-int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id)
+int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id)
{
- struct wl1271_cmd_set_keys *cmd;
+ struct wl1271_cmd_set_sta_keys *cmd;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd set_default_wep_key %d", id);
@@ -731,11 +839,42 @@ out:
return ret;
}
-int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_ap_default_wep_key(struct wl1271 *wl, u8 id)
+{
+ struct wl1271_cmd_set_ap_keys *cmd;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_CMD, "cmd set_ap_default_wep_key %d", id);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cmd->hlid = WL1271_AP_BROADCAST_HLID;
+ cmd->key_id = id;
+ cmd->lid_key_type = WEP_DEFAULT_LID_TYPE;
+ cmd->key_action = cpu_to_le16(KEY_SET_ID);
+ cmd->key_type = KEY_WEP;
+
+ ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_warning("cmd set_ap_default_wep_key failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(cmd);
+
+ return ret;
+}
+
+int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, const u8 *addr,
u32 tx_seq_32, u16 tx_seq_16)
{
- struct wl1271_cmd_set_keys *cmd;
+ struct wl1271_cmd_set_sta_keys *cmd;
int ret = 0;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -788,6 +927,67 @@ out:
return ret;
}
+int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+ u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
+ u16 tx_seq_16)
+{
+ struct wl1271_cmd_set_ap_keys *cmd;
+ int ret = 0;
+ u8 lid_type;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ if (hlid == WL1271_AP_BROADCAST_HLID) {
+ if (key_type == KEY_WEP)
+ lid_type = WEP_DEFAULT_LID_TYPE;
+ else
+ lid_type = BROADCAST_LID_TYPE;
+ } else {
+ lid_type = UNICAST_LID_TYPE;
+ }
+
+ wl1271_debug(DEBUG_CRYPT, "ap key action: %d id: %d lid: %d type: %d"
+ " hlid: %d", (int)action, (int)id, (int)lid_type,
+ (int)key_type, (int)hlid);
+
+ cmd->lid_key_type = lid_type;
+ cmd->hlid = hlid;
+ cmd->key_action = cpu_to_le16(action);
+ cmd->key_size = key_size;
+ cmd->key_type = key_type;
+ cmd->key_id = id;
+ cmd->ac_seq_num16[0] = cpu_to_le16(tx_seq_16);
+ cmd->ac_seq_num32[0] = cpu_to_le32(tx_seq_32);
+
+ if (key_type == KEY_TKIP) {
+ /*
+ * We get the key in the following form:
+ * TKIP (16 bytes) - TX MIC (8 bytes) - RX MIC (8 bytes)
+ * but the target is expecting:
+ * TKIP - RX MIC - TX MIC
+ */
+ memcpy(cmd->key, key, 16);
+ memcpy(cmd->key + 16, key + 24, 8);
+ memcpy(cmd->key + 24, key + 16, 8);
+ } else {
+ memcpy(cmd->key, key, key_size);
+ }
+
+ wl1271_dump(DEBUG_CRYPT, "TARGET AP KEY: ", cmd, sizeof(*cmd));
+
+ ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_warning("could not set ap keys");
+ goto out;
+ }
+
+out:
+ kfree(cmd);
+ return ret;
+}
+
int wl1271_cmd_disconnect(struct wl1271 *wl)
{
struct wl1271_cmd_disconnect *cmd;
@@ -850,3 +1050,180 @@ out_free:
out:
return ret;
}
+
+int wl1271_cmd_start_bss(struct wl1271 *wl)
+{
+ struct wl1271_cmd_bss_start *cmd;
+ struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+ int ret;
+
+ wl1271_debug(DEBUG_CMD, "cmd start bss");
+
+ /*
+ * FIXME: We currently do not support hidden SSID. The real SSID
+ * should be fetched from mac80211 first.
+ */
+ if (wl->ssid_len == 0) {
+ wl1271_warning("Hidden SSID currently not supported for AP");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(cmd->bssid, bss_conf->bssid, ETH_ALEN);
+
+ cmd->aging_period = cpu_to_le16(wl->conf.tx.ap_aging_period);
+ cmd->bss_index = WL1271_AP_BSS_INDEX;
+ cmd->global_hlid = WL1271_AP_GLOBAL_HLID;
+ cmd->broadcast_hlid = WL1271_AP_BROADCAST_HLID;
+ cmd->basic_rate_set = cpu_to_le32(wl->basic_rate_set);
+ cmd->beacon_interval = cpu_to_le16(wl->beacon_int);
+ cmd->dtim_interval = bss_conf->dtim_period;
+ cmd->beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
+ cmd->channel = wl->channel;
+ cmd->ssid_len = wl->ssid_len;
+ cmd->ssid_type = SSID_TYPE_PUBLIC;
+ memcpy(cmd->ssid, wl->ssid, wl->ssid_len);
+
+ switch (wl->band) {
+ case IEEE80211_BAND_2GHZ:
+ cmd->band = RADIO_BAND_2_4GHZ;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ cmd->band = RADIO_BAND_5GHZ;
+ break;
+ default:
+ wl1271_warning("bss start - unknown band: %d", (int)wl->band);
+ cmd->band = RADIO_BAND_2_4GHZ;
+ break;
+ }
+
+ ret = wl1271_cmd_send(wl, CMD_BSS_START, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to initiate cmd start bss");
+ goto out_free;
+ }
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
+
+int wl1271_cmd_stop_bss(struct wl1271 *wl)
+{
+ struct wl1271_cmd_bss_start *cmd;
+ int ret;
+
+ wl1271_debug(DEBUG_CMD, "cmd stop bss");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cmd->bss_index = WL1271_AP_BSS_INDEX;
+
+ ret = wl1271_cmd_send(wl, CMD_BSS_STOP, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to initiate cmd stop bss");
+ goto out_free;
+ }
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
+
+int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
+{
+ struct wl1271_cmd_add_sta *cmd;
+ int ret;
+
+ wl1271_debug(DEBUG_CMD, "cmd add sta %d", (int)hlid);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* currently we don't support UAPSD */
+ cmd->sp_len = 0;
+
+ memcpy(cmd->addr, sta->addr, ETH_ALEN);
+ cmd->bss_index = WL1271_AP_BSS_INDEX;
+ cmd->aid = sta->aid;
+ cmd->hlid = hlid;
+
+ /*
+ * FIXME: Does STA support QOS? We need to propagate this info from
+ * hostapd. Currently not that important since this is only used for
+ * sending the correct flavor of null-data packet in response to a
+ * trigger.
+ */
+ cmd->wmm = 0;
+
+ cmd->supported_rates = cpu_to_le32(wl1271_tx_enabled_rates_get(wl,
+ sta->supp_rates[wl->band]));
+
+ wl1271_debug(DEBUG_CMD, "new sta rates: 0x%x", cmd->supported_rates);
+
+ ret = wl1271_cmd_send(wl, CMD_ADD_STA, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to initiate cmd add sta");
+ goto out_free;
+ }
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
+
+int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid)
+{
+ struct wl1271_cmd_remove_sta *cmd;
+ int ret;
+
+ wl1271_debug(DEBUG_CMD, "cmd remove sta %d", (int)hlid);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cmd->hlid = hlid;
+ /* We never send a deauth, mac80211 is in charge of this */
+ cmd->reason_opcode = 0;
+ cmd->send_deauth_flag = 0;
+
+ ret = wl1271_cmd_send(wl, CMD_REMOVE_STA, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to initiate cmd remove sta");
+ goto out_free;
+ }
+
+ /*
+ * We are ok with a timeout here. The event is sometimes not sent
+ * due to a firmware bug.
+ */
+ wl1271_cmd_wait_for_event_or_timeout(wl, STA_REMOVE_COMPLETE_EVENT_ID);
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/cmd.h b/drivers/net/wireless/wl12xx/cmd.h
index 2a1d9db7ceb..5cac95d9480 100644
--- a/drivers/net/wireless/wl12xx/cmd.h
+++ b/drivers/net/wireless/wl12xx/cmd.h
@@ -32,14 +32,16 @@ struct acx_header;
int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
size_t res_len);
int wl1271_cmd_general_parms(struct wl1271 *wl);
+int wl128x_cmd_general_parms(struct wl1271 *wl);
int wl1271_cmd_radio_parms(struct wl1271 *wl);
+int wl128x_cmd_radio_parms(struct wl1271 *wl);
int wl1271_cmd_ext_radio_parms(struct wl1271 *wl);
int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type);
int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, u32 rates, bool send);
+int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode);
int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
size_t len);
int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
@@ -54,12 +56,20 @@ struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr);
int wl1271_build_qos_null_data(struct wl1271 *wl);
int wl1271_cmd_build_klv_null_data(struct wl1271 *wl);
-int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id);
-int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
- u8 key_size, const u8 *key, const u8 *addr,
- u32 tx_seq_32, u16 tx_seq_16);
+int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id);
+int wl1271_cmd_set_ap_default_wep_key(struct wl1271 *wl, u8 id);
+int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+ u8 key_size, const u8 *key, const u8 *addr,
+ u32 tx_seq_32, u16 tx_seq_16);
+int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+ u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
+ u16 tx_seq_16);
int wl1271_cmd_disconnect(struct wl1271 *wl);
int wl1271_cmd_set_sta_state(struct wl1271 *wl);
+int wl1271_cmd_start_bss(struct wl1271 *wl);
+int wl1271_cmd_stop_bss(struct wl1271 *wl);
+int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid);
+int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid);
enum wl1271_commands {
CMD_INTERROGATE = 1, /*use this to read information elements*/
@@ -98,6 +108,12 @@ enum wl1271_commands {
CMD_STOP_PERIODIC_SCAN = 51,
CMD_SET_STA_STATE = 52,
+ /* AP mode commands */
+ CMD_BSS_START = 60,
+ CMD_BSS_STOP = 61,
+ CMD_ADD_STA = 62,
+ CMD_REMOVE_STA = 63,
+
NUM_COMMANDS,
MAX_COMMAND_ID = 0xFFFF,
};
@@ -126,6 +142,14 @@ enum cmd_templ {
* For CTS-to-self (FastCTS) mechanism
* for BT/WLAN coexistence (SoftGemini). */
CMD_TEMPL_ARP_RSP,
+ CMD_TEMPL_LINK_MEASUREMENT_REPORT,
+
+ /* AP-mode specific */
+ CMD_TEMPL_AP_BEACON = 13,
+ CMD_TEMPL_AP_PROBE_RESPONSE,
+ CMD_TEMPL_AP_ARP_RSP,
+ CMD_TEMPL_DEAUTH_AP,
+
CMD_TEMPL_MAX = 0xff
};
@@ -195,6 +219,7 @@ struct wl1271_cmd_join {
* ACK or CTS frames).
*/
__le32 basic_rate_set;
+ __le32 supported_rate_set;
u8 dtim_interval;
/*
* bits 0-2: This bitwise field specifies the type
@@ -257,20 +282,11 @@ struct wl1271_cmd_ps_params {
struct wl1271_cmd_header header;
u8 ps_mode; /* STATION_* */
- u8 send_null_data; /* Do we have to send NULL data packet ? */
- u8 retries; /* Number of retires for the initial NULL data packet */
-
- /*
- * TUs during which the target stays awake after switching
- * to power save mode.
- */
- u8 hang_over_period;
- __le32 null_data_rate;
+ u8 padding[3];
} __packed;
/* HW encryption keys */
#define NUM_ACCESS_CATEGORIES_COPY 4
-#define MAX_KEY_SIZE 32
enum wl1271_cmd_key_action {
KEY_ADD_OR_REPLACE = 1,
@@ -289,7 +305,7 @@ enum wl1271_cmd_key_type {
/* FIXME: Add description for key-types */
-struct wl1271_cmd_set_keys {
+struct wl1271_cmd_set_sta_keys {
struct wl1271_cmd_header header;
/* Ignored for default WEP key */
@@ -318,6 +334,57 @@ struct wl1271_cmd_set_keys {
__le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
} __packed;
+enum wl1271_cmd_lid_key_type {
+ UNICAST_LID_TYPE = 0,
+ BROADCAST_LID_TYPE = 1,
+ WEP_DEFAULT_LID_TYPE = 2
+};
+
+struct wl1271_cmd_set_ap_keys {
+ struct wl1271_cmd_header header;
+
+ /*
+ * Indicates whether the HLID is a unicast key set
+ * or broadcast key set. A special value 0xFF is
+ * used to indicate that the HLID is on WEP-default
+ * (multi-hlids). of type wl1271_cmd_lid_key_type.
+ */
+ u8 hlid;
+
+ /*
+ * In WEP-default network (hlid == 0xFF) used to
+ * indicate which network STA/IBSS/AP role should be
+ * changed
+ */
+ u8 lid_key_type;
+
+ /*
+ * Key ID - For TKIP and AES key types, this field
+ * indicates the value that should be inserted into
+ * the KeyID field of frames transmitted using this
+ * key entry. For broadcast keys the index use as a
+ * marker for TX/RX key.
+ * For WEP default network (HLID=0xFF), this field
+ * indicates the ID of the key to add or remove.
+ */
+ u8 key_id;
+ u8 reserved_1;
+
+ /* key_action_e */
+ __le16 key_action;
+
+ /* key size in bytes */
+ u8 key_size;
+
+ /* key_type_e */
+ u8 key_type;
+
+ /* This field holds the security key data to add to the STA table */
+ u8 key[MAX_KEY_SIZE];
+ __le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
+ __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
+} __packed;
+
struct wl1271_cmd_test_header {
u8 id;
u8 padding[3];
@@ -350,6 +417,21 @@ struct wl1271_general_parms_cmd {
u8 padding[3];
} __packed;
+struct wl128x_general_parms_cmd {
+ struct wl1271_cmd_header header;
+
+ struct wl1271_cmd_test_header test;
+
+ struct wl128x_ini_general_params general_params;
+
+ u8 sr_debug_table[WL1271_INI_MAX_SMART_REFLEX_PARAM];
+ u8 sr_sen_n_p;
+ u8 sr_sen_n_p_gain;
+ u8 sr_sen_nrn;
+ u8 sr_sen_prn;
+ u8 padding[3];
+} __packed;
+
struct wl1271_radio_parms_cmd {
struct wl1271_cmd_header header;
@@ -366,6 +448,23 @@ struct wl1271_radio_parms_cmd {
u8 padding3[2];
} __packed;
+struct wl128x_radio_parms_cmd {
+ struct wl1271_cmd_header header;
+
+ struct wl1271_cmd_test_header test;
+
+ /* Static radio parameters */
+ struct wl128x_ini_band_params_2 static_params_2;
+ struct wl128x_ini_band_params_5 static_params_5;
+
+ u8 fem_vendor_and_options;
+
+ /* Dynamic radio parameters */
+ struct wl128x_ini_fem_params_2 dyn_params_2;
+ u8 padding2;
+ struct wl128x_ini_fem_params_5 dyn_params_5;
+} __packed;
+
struct wl1271_ext_radio_parms_cmd {
struct wl1271_cmd_header header;
@@ -412,4 +511,68 @@ struct wl1271_cmd_set_sta_state {
u8 padding[3];
} __packed;
+enum wl1271_ssid_type {
+ SSID_TYPE_PUBLIC = 0,
+ SSID_TYPE_HIDDEN = 1
+};
+
+struct wl1271_cmd_bss_start {
+ struct wl1271_cmd_header header;
+
+ /* wl1271_ssid_type */
+ u8 ssid_type;
+ u8 ssid_len;
+ u8 ssid[IW_ESSID_MAX_SIZE];
+ u8 padding_1[2];
+
+ /* Basic rate set */
+ __le32 basic_rate_set;
+ /* Aging period in seconds*/
+ __le16 aging_period;
+
+ /*
+ * This field specifies the time between target beacon
+ * transmission times (TBTTs), in time units (TUs).
+ * Valid values are 1 to 1024.
+ */
+ __le16 beacon_interval;
+ u8 bssid[ETH_ALEN];
+ u8 bss_index;
+ /* Radio band */
+ u8 band;
+ u8 channel;
+ /* The host link id for the AP's global queue */
+ u8 global_hlid;
+ /* The host link id for the AP's broadcast queue */
+ u8 broadcast_hlid;
+ /* DTIM count */
+ u8 dtim_interval;
+ /* Beacon expiry time in ms */
+ u8 beacon_expiry;
+ u8 padding_2[3];
+} __packed;
+
+struct wl1271_cmd_add_sta {
+ struct wl1271_cmd_header header;
+
+ u8 addr[ETH_ALEN];
+ u8 hlid;
+ u8 aid;
+ u8 psd_type[NUM_ACCESS_CATEGORIES_COPY];
+ __le32 supported_rates;
+ u8 bss_index;
+ u8 sp_len;
+ u8 wmm;
+ u8 padding1;
+} __packed;
+
+struct wl1271_cmd_remove_sta {
+ struct wl1271_cmd_header header;
+
+ u8 hlid;
+ u8 reason_opcode;
+ u8 send_deauth_flag;
+ u8 padding1;
+} __packed;
+
#endif /* __WL1271_CMD_H__ */
diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/wl12xx/conf.h
index a16b3616e43..a4e0acff867 100644
--- a/drivers/net/wireless/wl12xx/conf.h
+++ b/drivers/net/wireless/wl12xx/conf.h
@@ -496,6 +496,33 @@ struct conf_rx_settings {
CONF_HW_BIT_RATE_2MBPS)
#define CONF_TX_RATE_RETRY_LIMIT 10
+/*
+ * Rates supported for data packets when operating as AP. Note the absense
+ * of the 22Mbps rate. There is a FW limitation on 12 rates so we must drop
+ * one. The rate dropped is not mandatory under any operating mode.
+ */
+#define CONF_TX_AP_ENABLED_RATES (CONF_HW_BIT_RATE_1MBPS | \
+ CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS | \
+ CONF_HW_BIT_RATE_6MBPS | CONF_HW_BIT_RATE_9MBPS | \
+ CONF_HW_BIT_RATE_11MBPS | CONF_HW_BIT_RATE_12MBPS | \
+ CONF_HW_BIT_RATE_18MBPS | CONF_HW_BIT_RATE_24MBPS | \
+ CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \
+ CONF_HW_BIT_RATE_54MBPS)
+
+/*
+ * Default rates for management traffic when operating in AP mode. This
+ * should be configured according to the basic rate set of the AP
+ */
+#define CONF_TX_AP_DEFAULT_MGMT_RATES (CONF_HW_BIT_RATE_1MBPS | \
+ CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS)
+
+/*
+ * Default rates for working as IBSS. use 11b rates
+ */
+#define CONF_TX_IBSS_DEFAULT_RATES (CONF_HW_BIT_RATE_1MBPS | \
+ CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS | \
+ CONF_HW_BIT_RATE_11MBPS);
+
struct conf_tx_rate_class {
/*
@@ -636,9 +663,9 @@ struct conf_tx_settings {
/*
* Configuration for rate classes for TX (currently only one
- * rate class supported.)
+ * rate class supported). Used in non-AP mode.
*/
- struct conf_tx_rate_class rc_conf;
+ struct conf_tx_rate_class sta_rc_conf;
/*
* Configuration for access categories for TX rate control.
@@ -647,6 +674,36 @@ struct conf_tx_settings {
struct conf_tx_ac_category ac_conf[CONF_TX_MAX_AC_COUNT];
/*
+ * Configuration for rate classes in AP-mode. These rate classes
+ * are for the AC TX queues
+ */
+ struct conf_tx_rate_class ap_rc_conf[CONF_TX_MAX_AC_COUNT];
+
+ /*
+ * Management TX rate class for AP-mode.
+ */
+ struct conf_tx_rate_class ap_mgmt_conf;
+
+ /*
+ * Broadcast TX rate class for AP-mode.
+ */
+ struct conf_tx_rate_class ap_bcst_conf;
+
+ /*
+ * Allow this number of TX retries to a connected station/AP before an
+ * event is triggered from FW.
+ * In AP-mode the hlids of unreachable stations are given in the
+ * "sta_tx_retry_exceeded" member in the event mailbox.
+ */
+ u8 max_tx_retries;
+
+ /*
+ * AP-mode - after this number of seconds a connected station is
+ * considered inactive.
+ */
+ u16 ap_aging_period;
+
+ /*
* Configuration for TID parameters.
*/
u8 tid_conf_count;
@@ -687,6 +744,12 @@ struct conf_tx_settings {
* Range: CONF_HW_BIT_RATE_* bit mask
*/
u32 basic_rate_5;
+
+ /*
+ * TX retry limits for templates
+ */
+ u8 tmpl_short_retry_limit;
+ u8 tmpl_long_retry_limit;
};
enum {
@@ -912,6 +975,14 @@ struct conf_conn_settings {
u8 psm_entry_retries;
/*
+ * Specifies the maximum number of times to try PSM exit if it fails
+ * (if sending the appropriate null-func message fails.)
+ *
+ * Range 0 - 255
+ */
+ u8 psm_exit_retries;
+
+ /*
* Specifies the maximum number of times to try transmit the PSM entry
* null-func frame for each PSM entry attempt
*
@@ -948,7 +1019,9 @@ enum {
CONF_REF_CLK_19_2_E,
CONF_REF_CLK_26_E,
CONF_REF_CLK_38_4_E,
- CONF_REF_CLK_52_E
+ CONF_REF_CLK_52_E,
+ CONF_REF_CLK_38_4_M_XTAL,
+ CONF_REF_CLK_26_M_XTAL,
};
enum single_dual_band_enum {
@@ -962,15 +1035,6 @@ enum single_dual_band_enum {
#define CONF_NUMBER_OF_CHANNELS_2_4 14
#define CONF_NUMBER_OF_CHANNELS_5 35
-struct conf_radio_parms {
- /*
- * FEM parameter set to use
- *
- * Range: 0 or 1
- */
- u8 fem;
-};
-
struct conf_itrim_settings {
/* enable dco itrim */
u8 enable;
@@ -1036,30 +1100,30 @@ struct conf_scan_settings {
/*
* The minimum time to wait on each channel for active scans
*
- * Range: 0 - 65536 tu
+ * Range: u32 tu/1000
*/
- u16 min_dwell_time_active;
+ u32 min_dwell_time_active;
/*
* The maximum time to wait on each channel for active scans
*
- * Range: 0 - 65536 tu
+ * Range: u32 tu/1000
*/
- u16 max_dwell_time_active;
+ u32 max_dwell_time_active;
/*
- * The maximum time to wait on each channel for passive scans
+ * The minimum time to wait on each channel for passive scans
*
- * Range: 0 - 65536 tu
+ * Range: u32 tu/1000
*/
- u16 min_dwell_time_passive;
+ u32 min_dwell_time_passive;
/*
* The maximum time to wait on each channel for passive scans
*
- * Range: 0 - 65536 tu
+ * Range: u32 tu/1000
*/
- u16 max_dwell_time_passive;
+ u32 max_dwell_time_passive;
/*
* Number of probe requests to transmit on each active scan channel
@@ -1090,6 +1154,64 @@ struct conf_rf_settings {
u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5];
};
+struct conf_ht_setting {
+ u16 tx_ba_win_size;
+ u16 inactivity_timeout;
+};
+
+struct conf_memory_settings {
+ /* Number of stations supported in IBSS mode */
+ u8 num_stations;
+
+ /* Number of ssid profiles used in IBSS mode */
+ u8 ssid_profiles;
+
+ /* Number of memory buffers allocated to rx pool */
+ u8 rx_block_num;
+
+ /* Minimum number of blocks allocated to tx pool */
+ u8 tx_min_block_num;
+
+ /* Disable/Enable dynamic memory */
+ u8 dynamic_memory;
+
+ /*
+ * Minimum required free tx memory blocks in order to assure optimum
+ * performence
+ *
+ * Range: 0-120
+ */
+ u8 min_req_tx_blocks;
+
+ /*
+ * Minimum required free rx memory blocks in order to assure optimum
+ * performence
+ *
+ * Range: 0-120
+ */
+ u8 min_req_rx_blocks;
+
+ /*
+ * Minimum number of mem blocks (free+used) guaranteed for TX
+ *
+ * Range: 0-120
+ */
+ u8 tx_min;
+};
+
+struct conf_fm_coex {
+ u8 enable;
+ u8 swallow_period;
+ u8 n_divider_fref_set_1;
+ u8 n_divider_fref_set_2;
+ u16 m_divider_fref_set_1;
+ u16 m_divider_fref_set_2;
+ u32 coex_pll_stabilization_time;
+ u16 ldo_stabilization_time;
+ u8 fm_disturbed_band_margin;
+ u8 swallow_clk_diff;
+};
+
struct conf_drv_settings {
struct conf_sg_settings sg;
struct conf_rx_settings rx;
@@ -1100,6 +1222,11 @@ struct conf_drv_settings {
struct conf_roam_trigger_settings roam_trigger;
struct conf_scan_settings scan;
struct conf_rf_settings rf;
+ struct conf_ht_setting ht;
+ struct conf_memory_settings mem_wl127x;
+ struct conf_memory_settings mem_wl128x;
+ struct conf_fm_coex fm_coex;
+ u8 hci_io_ds;
};
#endif
diff --git a/drivers/net/wireless/wl12xx/debugfs.c b/drivers/net/wireless/wl12xx/debugfs.c
index ec607776015..b17cff6cd75 100644
--- a/drivers/net/wireless/wl12xx/debugfs.c
+++ b/drivers/net/wireless/wl12xx/debugfs.c
@@ -99,7 +99,7 @@ static void wl1271_debugfs_update_stats(struct wl1271 *wl)
mutex_lock(&wl->mutex);
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -261,27 +261,25 @@ static ssize_t gpio_power_write(struct file *file,
unsigned long value;
int ret;
- mutex_lock(&wl->mutex);
-
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len)) {
- ret = -EFAULT;
- goto out;
+ return -EFAULT;
}
buf[len] = '\0';
- ret = strict_strtoul(buf, 0, &value);
+ ret = kstrtoul(buf, 0, &value);
if (ret < 0) {
wl1271_warning("illegal value in gpio_power");
- goto out;
+ return -EINVAL;
}
+ mutex_lock(&wl->mutex);
+
if (value)
wl1271_power_on(wl);
else
wl1271_power_off(wl);
-out:
mutex_unlock(&wl->mutex);
return count;
}
@@ -293,12 +291,143 @@ static const struct file_operations gpio_power_ops = {
.llseek = default_llseek,
};
-static int wl1271_debugfs_add_files(struct wl1271 *wl)
+static ssize_t dtim_interval_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ u8 value;
+
+ if (wl->conf.conn.wake_up_event == CONF_WAKE_UP_EVENT_DTIM ||
+ wl->conf.conn.wake_up_event == CONF_WAKE_UP_EVENT_N_DTIM)
+ value = wl->conf.conn.listen_interval;
+ else
+ value = 0;
+
+ return wl1271_format_buffer(user_buf, count, ppos, "%d\n", value);
+}
+
+static ssize_t dtim_interval_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ char buf[10];
+ size_t len;
+ unsigned long value;
+ int ret;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+
+ ret = kstrtoul(buf, 0, &value);
+ if (ret < 0) {
+ wl1271_warning("illegal value for dtim_interval");
+ return -EINVAL;
+ }
+
+ if (value < 1 || value > 10) {
+ wl1271_warning("dtim value is not in valid range");
+ return -ERANGE;
+ }
+
+ mutex_lock(&wl->mutex);
+
+ wl->conf.conn.listen_interval = value;
+ /* for some reason there are different event types for 1 and >1 */
+ if (value == 1)
+ wl->conf.conn.wake_up_event = CONF_WAKE_UP_EVENT_DTIM;
+ else
+ wl->conf.conn.wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM;
+
+ /*
+ * we don't reconfigure ACX_WAKE_UP_CONDITIONS now, so it will only
+ * take effect on the next time we enter psm.
+ */
+ mutex_unlock(&wl->mutex);
+ return count;
+}
+
+static const struct file_operations dtim_interval_ops = {
+ .read = dtim_interval_read,
+ .write = dtim_interval_write,
+ .open = wl1271_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static ssize_t beacon_interval_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ u8 value;
+
+ if (wl->conf.conn.wake_up_event == CONF_WAKE_UP_EVENT_BEACON ||
+ wl->conf.conn.wake_up_event == CONF_WAKE_UP_EVENT_N_BEACONS)
+ value = wl->conf.conn.listen_interval;
+ else
+ value = 0;
+
+ return wl1271_format_buffer(user_buf, count, ppos, "%d\n", value);
+}
+
+static ssize_t beacon_interval_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ char buf[10];
+ size_t len;
+ unsigned long value;
+ int ret;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+
+ ret = kstrtoul(buf, 0, &value);
+ if (ret < 0) {
+ wl1271_warning("illegal value for beacon_interval");
+ return -EINVAL;
+ }
+
+ if (value < 1 || value > 255) {
+ wl1271_warning("beacon interval value is not in valid range");
+ return -ERANGE;
+ }
+
+ mutex_lock(&wl->mutex);
+
+ wl->conf.conn.listen_interval = value;
+ /* for some reason there are different event types for 1 and >1 */
+ if (value == 1)
+ wl->conf.conn.wake_up_event = CONF_WAKE_UP_EVENT_BEACON;
+ else
+ wl->conf.conn.wake_up_event = CONF_WAKE_UP_EVENT_N_BEACONS;
+
+ /*
+ * we don't reconfigure ACX_WAKE_UP_CONDITIONS now, so it will only
+ * take effect on the next time we enter psm.
+ */
+ mutex_unlock(&wl->mutex);
+ return count;
+}
+
+static const struct file_operations beacon_interval_ops = {
+ .read = beacon_interval_read,
+ .write = beacon_interval_write,
+ .open = wl1271_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static int wl1271_debugfs_add_files(struct wl1271 *wl,
+ struct dentry *rootdir)
{
int ret = 0;
struct dentry *entry, *stats;
- stats = debugfs_create_dir("fw-statistics", wl->rootdir);
+ stats = debugfs_create_dir("fw-statistics", rootdir);
if (!stats || IS_ERR(stats)) {
entry = stats;
goto err;
@@ -395,16 +524,13 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl)
DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data);
DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
- DEBUGFS_ADD(tx_queue_len, wl->rootdir);
- DEBUGFS_ADD(retry_count, wl->rootdir);
- DEBUGFS_ADD(excessive_retries, wl->rootdir);
+ DEBUGFS_ADD(tx_queue_len, rootdir);
+ DEBUGFS_ADD(retry_count, rootdir);
+ DEBUGFS_ADD(excessive_retries, rootdir);
- DEBUGFS_ADD(gpio_power, wl->rootdir);
-
- entry = debugfs_create_x32("debug_level", 0600, wl->rootdir,
- &wl12xx_debug_level);
- if (!entry || IS_ERR(entry))
- goto err;
+ DEBUGFS_ADD(gpio_power, rootdir);
+ DEBUGFS_ADD(dtim_interval, rootdir);
+ DEBUGFS_ADD(beacon_interval, rootdir);
return 0;
@@ -419,7 +545,7 @@ err:
void wl1271_debugfs_reset(struct wl1271 *wl)
{
- if (!wl->rootdir)
+ if (!wl->stats.fw_stats)
return;
memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
@@ -430,13 +556,13 @@ void wl1271_debugfs_reset(struct wl1271 *wl)
int wl1271_debugfs_init(struct wl1271 *wl)
{
int ret;
+ struct dentry *rootdir;
- wl->rootdir = debugfs_create_dir(KBUILD_MODNAME,
- wl->hw->wiphy->debugfsdir);
+ rootdir = debugfs_create_dir(KBUILD_MODNAME,
+ wl->hw->wiphy->debugfsdir);
- if (IS_ERR(wl->rootdir)) {
- ret = PTR_ERR(wl->rootdir);
- wl->rootdir = NULL;
+ if (IS_ERR(rootdir)) {
+ ret = PTR_ERR(rootdir);
goto err;
}
@@ -450,7 +576,7 @@ int wl1271_debugfs_init(struct wl1271 *wl)
wl->stats.fw_stats_update = jiffies;
- ret = wl1271_debugfs_add_files(wl);
+ ret = wl1271_debugfs_add_files(wl, rootdir);
if (ret < 0)
goto err_file;
@@ -462,8 +588,7 @@ err_file:
wl->stats.fw_stats = NULL;
err_fw:
- debugfs_remove_recursive(wl->rootdir);
- wl->rootdir = NULL;
+ debugfs_remove_recursive(rootdir);
err:
return ret;
@@ -473,8 +598,4 @@ void wl1271_debugfs_exit(struct wl1271 *wl)
{
kfree(wl->stats.fw_stats);
wl->stats.fw_stats = NULL;
-
- debugfs_remove_recursive(wl->rootdir);
- wl->rootdir = NULL;
-
}
diff --git a/drivers/net/wireless/wl12xx/event.c b/drivers/net/wireless/wl12xx/event.c
index f9146f5242f..933817ad6d6 100644
--- a/drivers/net/wireless/wl12xx/event.c
+++ b/drivers/net/wireless/wl12xx/event.c
@@ -33,6 +33,7 @@ void wl1271_pspoll_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct wl1271 *wl;
+ int ret;
dwork = container_of(work, struct delayed_work, work);
wl = container_of(dwork, struct wl1271, pspoll_work);
@@ -55,8 +56,13 @@ void wl1271_pspoll_work(struct work_struct *work)
* delivery failure occurred, and no-one changed state since, so
* we should go back to powersave.
*/
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, wl->basic_rate, true);
+ wl1271_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
};
@@ -129,26 +135,7 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
/* enable beacon early termination */
ret = wl1271_acx_bet_enable(wl, true);
- if (ret < 0)
- break;
-
- /* go to extremely low power mode */
- wl1271_ps_elp_sleep(wl);
- break;
- case EVENT_EXIT_POWER_SAVE_FAIL:
- wl1271_debug(DEBUG_PSM, "PSM exit failed");
-
- if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
- wl->psm_entry_retry = 0;
- break;
- }
-
- /* make sure the firmware goes to active mode - the frame to
- be sent next will indicate to the AP, that we are active. */
- ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
- wl->basic_rate, false);
break;
- case EVENT_EXIT_POWER_SAVE_SUCCESS:
default:
break;
}
@@ -186,6 +173,9 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
int ret;
u32 vector;
bool beacon_loss = false;
+ bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ bool disconnect_sta = false;
+ unsigned long sta_bitmap = 0;
wl1271_event_mbox_dump(mbox);
@@ -218,21 +208,21 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
* BSS_LOSE_EVENT, beacon loss has to be reported to the stack.
*
*/
- if (vector & BSS_LOSE_EVENT_ID) {
+ if ((vector & BSS_LOSE_EVENT_ID) && !is_ap) {
wl1271_info("Beacon loss detected.");
/* indicate to the stack, that beacons have been lost */
beacon_loss = true;
}
- if (vector & PS_REPORT_EVENT_ID) {
+ if ((vector & PS_REPORT_EVENT_ID) && !is_ap) {
wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT");
ret = wl1271_event_ps_report(wl, mbox, &beacon_loss);
if (ret < 0)
return ret;
}
- if (vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID)
+ if ((vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID) && !is_ap)
wl1271_event_pspoll_delivery_fail(wl);
if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
@@ -241,9 +231,62 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
wl1271_event_rssi_trigger(wl, mbox);
}
+ if ((vector & DUMMY_PACKET_EVENT_ID) && !is_ap) {
+ wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
+ if (wl->vif)
+ wl1271_tx_dummy_packet(wl);
+ }
+
+ /*
+ * "TX retries exceeded" has a different meaning according to mode.
+ * In AP mode the offending station is disconnected. In STA mode we
+ * report connection loss.
+ */
+ if (vector & MAX_TX_RETRY_EVENT_ID) {
+ wl1271_debug(DEBUG_EVENT, "MAX_TX_RETRY_EVENT_ID");
+ if (is_ap) {
+ sta_bitmap |= le16_to_cpu(mbox->sta_tx_retry_exceeded);
+ disconnect_sta = true;
+ } else {
+ beacon_loss = true;
+ }
+ }
+
+ if ((vector & INACTIVE_STA_EVENT_ID) && is_ap) {
+ wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID");
+ sta_bitmap |= le16_to_cpu(mbox->sta_aging_status);
+ disconnect_sta = true;
+ }
+
if (wl->vif && beacon_loss)
ieee80211_connection_loss(wl->vif);
+ if (is_ap && disconnect_sta) {
+ u32 num_packets = wl->conf.tx.max_tx_retries;
+ struct ieee80211_sta *sta;
+ const u8 *addr;
+ int h;
+
+ for (h = find_first_bit(&sta_bitmap, AP_MAX_LINKS);
+ h < AP_MAX_LINKS;
+ h = find_next_bit(&sta_bitmap, AP_MAX_LINKS, h+1)) {
+ if (!wl1271_is_active_sta(wl, h))
+ continue;
+
+ addr = wl->links[h].addr;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(wl->vif, addr);
+ if (sta) {
+ wl1271_debug(DEBUG_EVENT, "remove sta %d", h);
+#if 0
+ ieee80211_report_low_ack(sta, num_packets);
+#endif
+ }
+ rcu_read_unlock();
+ }
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/wl12xx/event.h b/drivers/net/wireless/wl12xx/event.h
index 6cce0143adb..7ae5a082124 100644
--- a/drivers/net/wireless/wl12xx/event.h
+++ b/drivers/net/wireless/wl12xx/event.h
@@ -58,9 +58,16 @@ enum {
CHANNEL_SWITCH_COMPLETE_EVENT_ID = BIT(17),
BSS_LOSE_EVENT_ID = BIT(18),
REGAINED_BSS_EVENT_ID = BIT(19),
- ROAMING_TRIGGER_MAX_TX_RETRY_EVENT_ID = BIT(20),
+ MAX_TX_RETRY_EVENT_ID = BIT(20),
+ /* STA: dummy paket for dynamic mem blocks */
+ DUMMY_PACKET_EVENT_ID = BIT(21),
+ /* AP: STA remove complete */
+ STA_REMOVE_COMPLETE_EVENT_ID = BIT(21),
SOFT_GEMINI_SENSE_EVENT_ID = BIT(22),
+ /* STA: SG prediction */
SOFT_GEMINI_PREDICTION_EVENT_ID = BIT(23),
+ /* AP: Inactive STA */
+ INACTIVE_STA_EVENT_ID = BIT(23),
SOFT_GEMINI_AVALANCHE_EVENT_ID = BIT(24),
PLT_RX_CALIBRATION_COMPLETE_EVENT_ID = BIT(25),
DBG_EVENT_ID = BIT(26),
@@ -74,8 +81,6 @@ enum {
enum {
EVENT_ENTER_POWER_SAVE_FAIL = 0,
EVENT_ENTER_POWER_SAVE_SUCCESS,
- EVENT_EXIT_POWER_SAVE_FAIL,
- EVENT_EXIT_POWER_SAVE_SUCCESS,
};
struct event_debug_report {
@@ -115,7 +120,16 @@ struct event_mailbox {
u8 scheduled_scan_status;
u8 ps_status;
- u8 reserved_5[29];
+ /* AP FW only */
+ u8 hlid_removed;
+
+ /* a bitmap of hlids for stations that have been inactive too long */
+ __le16 sta_aging_status;
+
+ /* a bitmap of hlids for stations which didn't respond to TX */
+ __le16 sta_tx_retry_exceeded;
+
+ u8 reserved_5[24];
} __packed;
int wl1271_event_unmask(struct wl1271 *wl);
@@ -123,4 +137,7 @@ void wl1271_event_mbox_config(struct wl1271 *wl);
int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
void wl1271_pspoll_work(struct work_struct *work);
+/* Functions from main.c */
+bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid);
+
#endif
diff --git a/drivers/net/wireless/wl12xx/ini.h b/drivers/net/wireless/wl12xx/ini.h
index c330a2583df..1420c842b8f 100644
--- a/drivers/net/wireless/wl12xx/ini.h
+++ b/drivers/net/wireless/wl12xx/ini.h
@@ -41,6 +41,28 @@ struct wl1271_ini_general_params {
u8 srf3[WL1271_INI_MAX_SMART_REFLEX_PARAM];
} __packed;
+#define WL128X_INI_MAX_SETTINGS_PARAM 4
+
+struct wl128x_ini_general_params {
+ u8 ref_clock;
+ u8 settling_time;
+ u8 clk_valid_on_wakeup;
+ u8 tcxo_ref_clock;
+ u8 tcxo_settling_time;
+ u8 tcxo_valid_on_wakeup;
+ u8 tcxo_ldo_voltage;
+ u8 xtal_itrim_val;
+ u8 platform_conf;
+ u8 dual_mode_select;
+ u8 tx_bip_fem_auto_detect;
+ u8 tx_bip_fem_manufacturer;
+ u8 general_settings[WL128X_INI_MAX_SETTINGS_PARAM];
+ u8 sr_state;
+ u8 srf1[WL1271_INI_MAX_SMART_REFLEX_PARAM];
+ u8 srf2[WL1271_INI_MAX_SMART_REFLEX_PARAM];
+ u8 srf3[WL1271_INI_MAX_SMART_REFLEX_PARAM];
+} __packed;
+
#define WL1271_INI_RSSI_PROCESS_COMPENS_SIZE 15
struct wl1271_ini_band_params_2 {
@@ -49,9 +71,16 @@ struct wl1271_ini_band_params_2 {
u8 rx_rssi_process_compens[WL1271_INI_RSSI_PROCESS_COMPENS_SIZE];
} __packed;
-#define WL1271_INI_RATE_GROUP_COUNT 6
#define WL1271_INI_CHANNEL_COUNT_2 14
+struct wl128x_ini_band_params_2 {
+ u8 rx_trace_insertion_loss;
+ u8 tx_trace_loss[WL1271_INI_CHANNEL_COUNT_2];
+ u8 rx_rssi_process_compens[WL1271_INI_RSSI_PROCESS_COMPENS_SIZE];
+} __packed;
+
+#define WL1271_INI_RATE_GROUP_COUNT 6
+
struct wl1271_ini_fem_params_2 {
__le16 tx_bip_ref_pd_voltage;
u8 tx_bip_ref_power;
@@ -68,6 +97,28 @@ struct wl1271_ini_fem_params_2 {
u8 normal_to_degraded_high_thr;
} __packed;
+#define WL128X_INI_RATE_GROUP_COUNT 7
+/* low and high temperatures */
+#define WL128X_INI_PD_VS_TEMPERATURE_RANGES 2
+
+struct wl128x_ini_fem_params_2 {
+ __le16 tx_bip_ref_pd_voltage;
+ u8 tx_bip_ref_power;
+ u8 tx_bip_ref_offset;
+ u8 tx_per_rate_pwr_limits_normal[WL128X_INI_RATE_GROUP_COUNT];
+ u8 tx_per_rate_pwr_limits_degraded[WL128X_INI_RATE_GROUP_COUNT];
+ u8 tx_per_rate_pwr_limits_extreme[WL128X_INI_RATE_GROUP_COUNT];
+ u8 tx_per_chan_pwr_limits_11b[WL1271_INI_CHANNEL_COUNT_2];
+ u8 tx_per_chan_pwr_limits_ofdm[WL1271_INI_CHANNEL_COUNT_2];
+ u8 tx_pd_vs_rate_offsets[WL128X_INI_RATE_GROUP_COUNT];
+ u8 tx_ibias[WL128X_INI_RATE_GROUP_COUNT + 1];
+ u8 tx_pd_vs_chan_offsets[WL1271_INI_CHANNEL_COUNT_2];
+ u8 tx_pd_vs_temperature[WL128X_INI_PD_VS_TEMPERATURE_RANGES];
+ u8 rx_fem_insertion_loss;
+ u8 degraded_low_to_normal_thr;
+ u8 normal_to_degraded_high_thr;
+} __packed;
+
#define WL1271_INI_CHANNEL_COUNT_5 35
#define WL1271_INI_SUB_BAND_COUNT_5 7
@@ -77,6 +128,12 @@ struct wl1271_ini_band_params_5 {
u8 rx_rssi_process_compens[WL1271_INI_RSSI_PROCESS_COMPENS_SIZE];
} __packed;
+struct wl128x_ini_band_params_5 {
+ u8 rx_trace_insertion_loss[WL1271_INI_SUB_BAND_COUNT_5];
+ u8 tx_trace_loss[WL1271_INI_CHANNEL_COUNT_5];
+ u8 rx_rssi_process_compens[WL1271_INI_RSSI_PROCESS_COMPENS_SIZE];
+} __packed;
+
struct wl1271_ini_fem_params_5 {
__le16 tx_bip_ref_pd_voltage[WL1271_INI_SUB_BAND_COUNT_5];
u8 tx_bip_ref_power[WL1271_INI_SUB_BAND_COUNT_5];
@@ -92,6 +149,23 @@ struct wl1271_ini_fem_params_5 {
u8 normal_to_degraded_high_thr;
} __packed;
+struct wl128x_ini_fem_params_5 {
+ __le16 tx_bip_ref_pd_voltage[WL1271_INI_SUB_BAND_COUNT_5];
+ u8 tx_bip_ref_power[WL1271_INI_SUB_BAND_COUNT_5];
+ u8 tx_bip_ref_offset[WL1271_INI_SUB_BAND_COUNT_5];
+ u8 tx_per_rate_pwr_limits_normal[WL128X_INI_RATE_GROUP_COUNT];
+ u8 tx_per_rate_pwr_limits_degraded[WL128X_INI_RATE_GROUP_COUNT];
+ u8 tx_per_rate_pwr_limits_extreme[WL128X_INI_RATE_GROUP_COUNT];
+ u8 tx_per_chan_pwr_limits_ofdm[WL1271_INI_CHANNEL_COUNT_5];
+ u8 tx_pd_vs_rate_offsets[WL128X_INI_RATE_GROUP_COUNT];
+ u8 tx_ibias[WL128X_INI_RATE_GROUP_COUNT];
+ u8 tx_pd_vs_chan_offsets[WL1271_INI_CHANNEL_COUNT_5];
+ u8 tx_pd_vs_temperature[WL1271_INI_SUB_BAND_COUNT_5 *
+ WL128X_INI_PD_VS_TEMPERATURE_RANGES];
+ u8 rx_fem_insertion_loss[WL1271_INI_SUB_BAND_COUNT_5];
+ u8 degraded_low_to_normal_thr;
+ u8 normal_to_degraded_high_thr;
+} __packed;
/* NVS data structure */
#define WL1271_INI_NVS_SECTION_SIZE 468
@@ -100,7 +174,7 @@ struct wl1271_ini_fem_params_5 {
#define WL1271_INI_LEGACY_NVS_FILE_SIZE 800
struct wl1271_nvs_file {
- /* NVS section */
+ /* NVS section - must be first! */
u8 nvs[WL1271_INI_NVS_SECTION_SIZE];
/* INI section */
@@ -120,4 +194,24 @@ struct wl1271_nvs_file {
} dyn_radio_params_5[WL1271_INI_FEM_MODULE_COUNT];
} __packed;
+struct wl128x_nvs_file {
+ /* NVS section - must be first! */
+ u8 nvs[WL1271_INI_NVS_SECTION_SIZE];
+
+ /* INI section */
+ struct wl128x_ini_general_params general_params;
+ u8 fem_vendor_and_options;
+ struct wl128x_ini_band_params_2 stat_radio_params_2;
+ u8 padding2;
+ struct {
+ struct wl128x_ini_fem_params_2 params;
+ u8 padding;
+ } dyn_radio_params_2[WL1271_INI_FEM_MODULE_COUNT];
+ struct wl128x_ini_band_params_5 stat_radio_params_5;
+ u8 padding3;
+ struct {
+ struct wl128x_ini_fem_params_5 params;
+ u8 padding;
+ } dyn_radio_params_5[WL1271_INI_FEM_MODULE_COUNT];
+} __packed;
#endif
diff --git a/drivers/net/wireless/wl12xx/init.c b/drivers/net/wireless/wl12xx/init.c
index 785a5304bfc..46fd7212b23 100644
--- a/drivers/net/wireless/wl12xx/init.c
+++ b/drivers/net/wireless/wl12xx/init.c
@@ -30,27 +30,10 @@
#include "acx.h"
#include "cmd.h"
#include "reg.h"
+#include "tx.h"
+#include "io.h"
-static int wl1271_init_hwenc_config(struct wl1271 *wl)
-{
- int ret;
-
- ret = wl1271_acx_feature_cfg(wl);
- if (ret < 0) {
- wl1271_warning("couldn't set feature config");
- return ret;
- }
-
- ret = wl1271_cmd_set_default_wep_key(wl, wl->default_key);
- if (ret < 0) {
- wl1271_warning("couldn't set default key");
- return ret;
- }
-
- return 0;
-}
-
-int wl1271_init_templates_config(struct wl1271 *wl)
+int wl1271_sta_init_templates_config(struct wl1271 *wl)
{
int ret, i;
@@ -118,6 +101,132 @@ int wl1271_init_templates_config(struct wl1271 *wl)
return 0;
}
+static int wl1271_ap_init_deauth_template(struct wl1271 *wl)
+{
+ struct wl12xx_disconn_template *tmpl;
+ int ret;
+
+ tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
+ if (!tmpl) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ tmpl->header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_DEAUTH);
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP,
+ tmpl, sizeof(*tmpl), 0,
+ wl1271_tx_min_rate_get(wl));
+
+out:
+ kfree(tmpl);
+ return ret;
+}
+
+static int wl1271_ap_init_null_template(struct wl1271 *wl)
+{
+ struct ieee80211_hdr_3addr *nullfunc;
+ int ret;
+
+ nullfunc = kzalloc(sizeof(*nullfunc), GFP_KERNEL);
+ if (!nullfunc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_NULLFUNC |
+ IEEE80211_FCTL_FROMDS);
+
+ /* nullfunc->addr1 is filled by FW */
+
+ memcpy(nullfunc->addr2, wl->mac_addr, ETH_ALEN);
+ memcpy(nullfunc->addr3, wl->mac_addr, ETH_ALEN);
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, nullfunc,
+ sizeof(*nullfunc), 0,
+ wl1271_tx_min_rate_get(wl));
+
+out:
+ kfree(nullfunc);
+ return ret;
+}
+
+static int wl1271_ap_init_qos_null_template(struct wl1271 *wl)
+{
+ struct ieee80211_qos_hdr *qosnull;
+ int ret;
+
+ qosnull = kzalloc(sizeof(*qosnull), GFP_KERNEL);
+ if (!qosnull) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ qosnull->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_NULLFUNC |
+ IEEE80211_FCTL_FROMDS);
+
+ /* qosnull->addr1 is filled by FW */
+
+ memcpy(qosnull->addr2, wl->mac_addr, ETH_ALEN);
+ memcpy(qosnull->addr3, wl->mac_addr, ETH_ALEN);
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, qosnull,
+ sizeof(*qosnull), 0,
+ wl1271_tx_min_rate_get(wl));
+
+out:
+ kfree(qosnull);
+ return ret;
+}
+
+static int wl1271_ap_init_templates_config(struct wl1271 *wl)
+{
+ int ret;
+
+ /*
+ * Put very large empty placeholders for all templates. These
+ * reserve memory for later.
+ */
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
+ sizeof
+ (struct wl12xx_probe_resp_template),
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL,
+ sizeof
+ (struct wl12xx_beacon_template),
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL,
+ sizeof
+ (struct wl12xx_disconn_template),
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
+ sizeof(struct wl12xx_null_data_template),
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
+ sizeof
+ (struct wl12xx_qos_null_data_template),
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static int wl1271_init_rx_config(struct wl1271 *wl, u32 config, u32 filter)
{
int ret;
@@ -145,10 +254,6 @@ int wl1271_init_phy_config(struct wl1271 *wl)
if (ret < 0)
return ret;
- ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
- if (ret < 0)
- return ret;
-
ret = wl1271_acx_service_period_timeout(wl);
if (ret < 0)
return ret;
@@ -213,26 +318,257 @@ static int wl1271_init_beacon_broadcast(struct wl1271 *wl)
return 0;
}
+static int wl1271_sta_hw_init(struct wl1271 *wl)
+{
+ int ret;
+
+ if (wl->chip.id != CHIP_ID_1283_PG20) {
+ ret = wl1271_cmd_ext_radio_parms(wl);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* PS config */
+ ret = wl1271_acx_config_ps(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_sta_init_templates_config(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize connection monitoring thresholds */
+ ret = wl1271_acx_conn_monit_params(wl, false);
+ if (ret < 0)
+ return ret;
+
+ /* Beacon filtering */
+ ret = wl1271_init_beacon_filter(wl);
+ if (ret < 0)
+ return ret;
+
+ /* Bluetooth WLAN coexistence */
+ ret = wl1271_init_pta(wl);
+ if (ret < 0)
+ return ret;
+
+ /* FM WLAN coexistence */
+ ret = wl1271_acx_fm_coex(wl);
+ if (ret < 0)
+ return ret;
+
+ /* Beacons and broadcast settings */
+ ret = wl1271_init_beacon_broadcast(wl);
+ if (ret < 0)
+ return ret;
+
+ /* Configure for ELP power saving */
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
+ if (ret < 0)
+ return ret;
+
+ /* Configure rssi/snr averaging weights */
+ ret = wl1271_acx_rssi_snr_avg_weights(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_acx_sta_rate_policies(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_acx_sta_max_tx_retry(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_acx_sta_mem_cfg(wl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl)
+{
+ int ret, i;
+
+ ret = wl1271_cmd_set_sta_default_wep_key(wl, wl->default_key);
+ if (ret < 0) {
+ wl1271_warning("couldn't set default key");
+ return ret;
+ }
+
+ /* disable all keep-alive templates */
+ for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
+ ret = wl1271_acx_keep_alive_config(wl, i,
+ ACX_KEEP_ALIVE_TPL_INVALID);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* disable the keep-alive feature */
+ ret = wl1271_acx_keep_alive_mode(wl, false);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int wl1271_ap_hw_init(struct wl1271 *wl)
+{
+ int ret, i;
+
+ ret = wl1271_ap_init_templates_config(wl);
+ if (ret < 0)
+ return ret;
+
+ /* Configure for power always on */
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+ if (ret < 0)
+ return ret;
+
+ /* Configure initial TX rate classes */
+ for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
+ ret = wl1271_acx_ap_rate_policy(wl,
+ &wl->conf.tx.ap_rc_conf[i], i);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = wl1271_acx_ap_rate_policy(wl,
+ &wl->conf.tx.ap_mgmt_conf,
+ ACX_TX_AP_MODE_MGMT_RATE);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_acx_ap_rate_policy(wl,
+ &wl->conf.tx.ap_bcst_conf,
+ ACX_TX_AP_MODE_BCST_RATE);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_acx_ap_max_tx_retry(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_acx_ap_mem_cfg(wl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl)
+{
+ int ret;
+
+ ret = wl1271_ap_init_deauth_template(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_ap_init_null_template(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_ap_init_qos_null_template(wl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void wl1271_check_ba_support(struct wl1271 *wl)
+{
+ /* validate FW cose ver x.x.x.50-60.x */
+ if ((wl->chip.fw_ver[3] >= WL12XX_BA_SUPPORT_FW_COST_VER2_START) &&
+ (wl->chip.fw_ver[3] < WL12XX_BA_SUPPORT_FW_COST_VER2_END)) {
+ wl->ba_support = true;
+ return;
+ }
+
+ wl->ba_support = false;
+}
+
+static int wl1271_set_ba_policies(struct wl1271 *wl)
+{
+ u8 tid_index;
+ int ret = 0;
+
+ /* Reset the BA RX indicators */
+ wl->ba_rx_bitmap = 0;
+
+ /* validate that FW support BA */
+ wl1271_check_ba_support(wl);
+
+ if (wl->ba_support)
+ /* 802.11n initiator BA session setting */
+ for (tid_index = 0; tid_index < CONF_TX_MAX_TID_COUNT;
+ ++tid_index) {
+ ret = wl1271_acx_set_ba_session(wl, WLAN_BACK_INITIATOR,
+ tid_index, true);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+int wl1271_chip_specific_init(struct wl1271 *wl)
+{
+ int ret = 0;
+
+ if (wl->chip.id == CHIP_ID_1283_PG20) {
+ u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE;
+
+ if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT)
+ /* Enable SDIO padding */
+ host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK;
+
+ /* Must be before wl1271_acx_init_mem_config() */
+ ret = wl1271_acx_host_if_cfg_bitmap(wl, host_cfg_bitmap);
+ if (ret < 0)
+ goto out;
+ }
+out:
+ return ret;
+}
+
+
int wl1271_hw_init(struct wl1271 *wl)
{
struct conf_tx_ac_category *conf_ac;
struct conf_tx_tid *conf_tid;
int ret, i;
+ bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
- ret = wl1271_cmd_general_parms(wl);
+ if (wl->chip.id == CHIP_ID_1283_PG20)
+ ret = wl128x_cmd_general_parms(wl);
+ else
+ ret = wl1271_cmd_general_parms(wl);
if (ret < 0)
return ret;
- ret = wl1271_cmd_radio_parms(wl);
+ if (wl->chip.id == CHIP_ID_1283_PG20)
+ ret = wl128x_cmd_radio_parms(wl);
+ else
+ ret = wl1271_cmd_radio_parms(wl);
if (ret < 0)
return ret;
- ret = wl1271_cmd_ext_radio_parms(wl);
+ /* Chip-specific init */
+ ret = wl1271_chip_specific_init(wl);
if (ret < 0)
return ret;
- /* Template settings */
- ret = wl1271_init_templates_config(wl);
+ /* Mode specific init */
+ if (is_ap)
+ ret = wl1271_ap_hw_init(wl);
+ else
+ ret = wl1271_sta_hw_init(wl);
+
if (ret < 0)
return ret;
@@ -259,16 +595,6 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
- /* Initialize connection monitoring thresholds */
- ret = wl1271_acx_conn_monit_params(wl, false);
- if (ret < 0)
- goto out_free_memmap;
-
- /* Beacon filtering */
- ret = wl1271_init_beacon_filter(wl);
- if (ret < 0)
- goto out_free_memmap;
-
/* Configure TX patch complete interrupt behavior */
ret = wl1271_acx_tx_config_options(wl);
if (ret < 0)
@@ -279,21 +605,11 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
- /* Bluetooth WLAN coexistence */
- ret = wl1271_init_pta(wl);
- if (ret < 0)
- goto out_free_memmap;
-
/* Energy detection */
ret = wl1271_init_energy_detection(wl);
if (ret < 0)
goto out_free_memmap;
- /* Beacons and boradcast settings */
- ret = wl1271_init_beacon_broadcast(wl);
- if (ret < 0)
- goto out_free_memmap;
-
/* Default fragmentation threshold */
ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
if (ret < 0)
@@ -321,23 +637,13 @@ int wl1271_hw_init(struct wl1271 *wl)
goto out_free_memmap;
}
- /* Configure TX rate classes */
- ret = wl1271_acx_rate_policies(wl);
- if (ret < 0)
- goto out_free_memmap;
-
/* Enable data path */
ret = wl1271_cmd_data_path(wl, 1);
if (ret < 0)
goto out_free_memmap;
- /* Configure for ELP power saving */
- ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
- if (ret < 0)
- goto out_free_memmap;
-
/* Configure HW encryption */
- ret = wl1271_init_hwenc_config(wl);
+ ret = wl1271_acx_feature_cfg(wl);
if (ret < 0)
goto out_free_memmap;
@@ -346,21 +652,17 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
- /* disable all keep-alive templates */
- for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
- ret = wl1271_acx_keep_alive_config(wl, i,
- ACX_KEEP_ALIVE_TPL_INVALID);
- if (ret < 0)
- goto out_free_memmap;
- }
+ /* Mode specific init - post mem init */
+ if (is_ap)
+ ret = wl1271_ap_hw_init_post_mem(wl);
+ else
+ ret = wl1271_sta_hw_init_post_mem(wl);
- /* disable the keep-alive feature */
- ret = wl1271_acx_keep_alive_mode(wl, false);
if (ret < 0)
goto out_free_memmap;
- /* Configure rssi/snr averaging weights */
- ret = wl1271_acx_rssi_snr_avg_weights(wl);
+ /* Configure initiator BA sessions policies */
+ ret = wl1271_set_ba_policies(wl);
if (ret < 0)
goto out_free_memmap;
diff --git a/drivers/net/wireless/wl12xx/init.h b/drivers/net/wireless/wl12xx/init.h
index 7762421f860..4975270a91a 100644
--- a/drivers/net/wireless/wl12xx/init.h
+++ b/drivers/net/wireless/wl12xx/init.h
@@ -27,10 +27,11 @@
#include "wl12xx.h"
int wl1271_hw_init_power_auth(struct wl1271 *wl);
-int wl1271_init_templates_config(struct wl1271 *wl);
+int wl1271_sta_init_templates_config(struct wl1271 *wl);
int wl1271_init_phy_config(struct wl1271 *wl);
int wl1271_init_pta(struct wl1271 *wl);
int wl1271_init_energy_detection(struct wl1271 *wl);
+int wl1271_chip_specific_init(struct wl1271 *wl);
int wl1271_hw_init(struct wl1271 *wl);
#endif
diff --git a/drivers/net/wireless/wl12xx/io.c b/drivers/net/wireless/wl12xx/io.c
index d557f73e7c1..da5c1ad942a 100644
--- a/drivers/net/wireless/wl12xx/io.c
+++ b/drivers/net/wireless/wl12xx/io.c
@@ -29,6 +29,7 @@
#include "wl12xx.h"
#include "wl12xx_80211.h"
#include "io.h"
+#include "tx.h"
#define OCP_CMD_LOOP 32
@@ -43,6 +44,16 @@
#define OCP_STATUS_REQ_FAILED 0x20000
#define OCP_STATUS_RESP_ERROR 0x30000
+bool wl1271_set_block_size(struct wl1271 *wl)
+{
+ if (wl->if_ops->set_block_size) {
+ wl->if_ops->set_block_size(wl, WL12XX_BUS_BLOCK_SIZE);
+ return true;
+ }
+
+ return false;
+}
+
void wl1271_disable_interrupts(struct wl1271 *wl)
{
wl->if_ops->disable_irq(wl);
diff --git a/drivers/net/wireless/wl12xx/io.h b/drivers/net/wireless/wl12xx/io.h
index 844b32b170b..36e185583ec 100644
--- a/drivers/net/wireless/wl12xx/io.h
+++ b/drivers/net/wireless/wl12xx/io.h
@@ -168,5 +168,9 @@ void wl1271_unregister_hw(struct wl1271 *wl);
int wl1271_init_ieee80211(struct wl1271 *wl);
struct ieee80211_hw *wl1271_alloc_hw(void);
int wl1271_free_hw(struct wl1271 *wl);
+irqreturn_t wl1271_irq(int irq, void *data);
+bool wl1271_set_block_size(struct wl1271 *wl);
+int wl1271_tx_dummy_packet(struct wl1271 *wl);
+void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters);
#endif
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index 062247ef3ad..a8eaa54158c 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -30,6 +30,7 @@
#include <linux/vmalloc.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/wl12xx.h>
#include "wl12xx.h"
#include "wl12xx_80211.h"
@@ -54,7 +55,7 @@ static struct conf_drv_settings default_conf = {
[CONF_SG_BT_PER_THRESHOLD] = 7500,
[CONF_SG_HV3_MAX_OVERRIDE] = 0,
[CONF_SG_BT_NFS_SAMPLE_INTERVAL] = 400,
- [CONF_SG_BT_LOAD_RATIO] = 50,
+ [CONF_SG_BT_LOAD_RATIO] = 200,
[CONF_SG_AUTO_PS_MODE] = 1,
[CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
[CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
@@ -116,11 +117,11 @@ static struct conf_drv_settings default_conf = {
},
.tx = {
.tx_energy_detection = 0,
- .rc_conf = {
+ .sta_rc_conf = {
.enabled_rates = 0,
.short_retry_limit = 10,
.long_retry_limit = 10,
- .aflags = 0
+ .aflags = 0,
},
.ac_conf_count = 4,
.ac_conf = {
@@ -153,6 +154,46 @@ static struct conf_drv_settings default_conf = {
.tx_op_limit = 1504,
},
},
+ .ap_rc_conf = {
+ [0] = {
+ .enabled_rates = CONF_TX_AP_ENABLED_RATES,
+ .short_retry_limit = 10,
+ .long_retry_limit = 10,
+ .aflags = 0,
+ },
+ [1] = {
+ .enabled_rates = CONF_TX_AP_ENABLED_RATES,
+ .short_retry_limit = 10,
+ .long_retry_limit = 10,
+ .aflags = 0,
+ },
+ [2] = {
+ .enabled_rates = CONF_TX_AP_ENABLED_RATES,
+ .short_retry_limit = 10,
+ .long_retry_limit = 10,
+ .aflags = 0,
+ },
+ [3] = {
+ .enabled_rates = CONF_TX_AP_ENABLED_RATES,
+ .short_retry_limit = 10,
+ .long_retry_limit = 10,
+ .aflags = 0,
+ },
+ },
+ .ap_mgmt_conf = {
+ .enabled_rates = CONF_TX_AP_DEFAULT_MGMT_RATES,
+ .short_retry_limit = 10,
+ .long_retry_limit = 10,
+ .aflags = 0,
+ },
+ .ap_bcst_conf = {
+ .enabled_rates = CONF_HW_BIT_RATE_1MBPS,
+ .short_retry_limit = 10,
+ .long_retry_limit = 10,
+ .aflags = 0,
+ },
+ .max_tx_retries = 100,
+ .ap_aging_period = 300,
.tid_conf_count = 4,
.tid_conf = {
[CONF_TX_AC_BE] = {
@@ -193,6 +234,8 @@ static struct conf_drv_settings default_conf = {
.tx_compl_threshold = 4,
.basic_rate = CONF_HW_BIT_RATE_1MBPS,
.basic_rate_5 = CONF_HW_BIT_RATE_6MBPS,
+ .tmpl_short_retry_limit = 10,
+ .tmpl_long_retry_limit = 10,
},
.conn = {
.wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
@@ -213,8 +256,9 @@ static struct conf_drv_settings default_conf = {
.ps_poll_threshold = 10,
.ps_poll_recovery_period = 700,
.bet_enable = CONF_BET_MODE_ENABLE,
- .bet_max_consecutive = 10,
+ .bet_max_consecutive = 50,
.psm_entry_retries = 5,
+ .psm_exit_retries = 16,
.psm_entry_nullfunc_retries = 3,
.psm_entry_hangover_period = 1,
.keep_alive_interval = 55000,
@@ -233,13 +277,13 @@ static struct conf_drv_settings default_conf = {
.avg_weight_rssi_beacon = 20,
.avg_weight_rssi_data = 10,
.avg_weight_snr_beacon = 20,
- .avg_weight_snr_data = 10
+ .avg_weight_snr_data = 10,
},
.scan = {
.min_dwell_time_active = 7500,
.max_dwell_time_active = 30000,
- .min_dwell_time_passive = 30000,
- .max_dwell_time_passive = 60000,
+ .min_dwell_time_passive = 100000,
+ .max_dwell_time_passive = 100000,
.num_probe_reqs = 2,
},
.rf = {
@@ -252,9 +296,47 @@ static struct conf_drv_settings default_conf = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
},
},
+ .ht = {
+ .tx_ba_win_size = 64,
+ .inactivity_timeout = 10000,
+ },
+ .mem_wl127x = {
+ .num_stations = 1,
+ .ssid_profiles = 1,
+ .rx_block_num = 70,
+ .tx_min_block_num = 40,
+ .dynamic_memory = 1,
+ .min_req_tx_blocks = 100,
+ .min_req_rx_blocks = 22,
+ .tx_min = 27,
+ },
+ .mem_wl128x = {
+ .num_stations = 1,
+ .ssid_profiles = 1,
+ .rx_block_num = 40,
+ .tx_min_block_num = 40,
+ .dynamic_memory = 1,
+ .min_req_tx_blocks = 45,
+ .min_req_rx_blocks = 22,
+ .tx_min = 27,
+ },
+ .fm_coex = {
+ .enable = true,
+ .swallow_period = 5,
+ .n_divider_fref_set_1 = 0xff, /* default */
+ .n_divider_fref_set_2 = 12,
+ .m_divider_fref_set_1 = 148,
+ .m_divider_fref_set_2 = 0xffff, /* default */
+ .coex_pll_stabilization_time = 0xffffffff, /* default */
+ .ldo_stabilization_time = 0xffff, /* default */
+ .fm_disturbed_band_margin = 0xff, /* default */
+ .swallow_clk_diff = 0xff, /* default */
+ },
+ .hci_io_ds = HCI_IO_DS_6MA,
};
static void __wl1271_op_remove_interface(struct wl1271 *wl);
+static void wl1271_free_ap_keys(struct wl1271 *wl);
static void wl1271_device_release(struct device *dev)
@@ -272,6 +354,7 @@ static struct platform_device wl1271_device = {
},
};
+static DEFINE_MUTEX(wl_list_mutex);
static LIST_HEAD(wl_list);
static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
@@ -302,10 +385,12 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
return NOTIFY_DONE;
wl_temp = hw->priv;
+ mutex_lock(&wl_list_mutex);
list_for_each_entry(wl, &wl_list, list) {
if (wl == wl_temp)
break;
}
+ mutex_unlock(&wl_list_mutex);
if (wl != wl_temp)
return NOTIFY_DONE;
@@ -317,7 +402,7 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
goto out;
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -381,19 +466,34 @@ static int wl1271_plt_init(struct wl1271 *wl)
struct conf_tx_tid *conf_tid;
int ret, i;
- ret = wl1271_cmd_general_parms(wl);
+ if (wl->chip.id == CHIP_ID_1283_PG20)
+ ret = wl128x_cmd_general_parms(wl);
+ else
+ ret = wl1271_cmd_general_parms(wl);
if (ret < 0)
return ret;
- ret = wl1271_cmd_radio_parms(wl);
+ if (wl->chip.id == CHIP_ID_1283_PG20)
+ ret = wl128x_cmd_radio_parms(wl);
+ else
+ ret = wl1271_cmd_radio_parms(wl);
+ if (ret < 0)
+ return ret;
+
+ if (wl->chip.id != CHIP_ID_1283_PG20) {
+ ret = wl1271_cmd_ext_radio_parms(wl);
+ if (ret < 0)
+ return ret;
+ }
if (ret < 0)
return ret;
- ret = wl1271_cmd_ext_radio_parms(wl);
+ /* Chip-specific initializations */
+ ret = wl1271_chip_specific_init(wl);
if (ret < 0)
return ret;
- ret = wl1271_init_templates_config(wl);
+ ret = wl1271_sta_init_templates_config(wl);
if (ret < 0)
return ret;
@@ -420,11 +520,20 @@ static int wl1271_plt_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
+ /* FM WLAN coexistence */
+ ret = wl1271_acx_fm_coex(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
/* Energy detection */
ret = wl1271_init_energy_detection(wl);
if (ret < 0)
goto out_free_memmap;
+ ret = wl1271_acx_sta_mem_cfg(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
/* Default fragmentation threshold */
ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
if (ret < 0)
@@ -476,14 +585,73 @@ static int wl1271_plt_init(struct wl1271 *wl)
return ret;
}
+static void wl1271_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_blks)
+{
+ bool fw_ps;
+
+ /* only regulate station links */
+ if (hlid < WL1271_AP_STA_HLID_START)
+ return;
+
+ fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+
+ /*
+ * Wake up from high level PS if the STA is asleep with too little
+ * blocks in FW or if the STA is awake.
+ */
+ if (!fw_ps || tx_blks < WL1271_PS_STA_MAX_BLOCKS)
+ wl1271_ps_link_end(wl, hlid);
+
+ /* Start high-level PS if the STA is asleep with enough blocks in FW */
+ else if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS)
+ wl1271_ps_link_start(wl, hlid, true);
+}
+
+static void wl1271_irq_update_links_status(struct wl1271 *wl,
+ struct wl1271_fw_ap_status *status)
+{
+ u32 cur_fw_ps_map;
+ u8 hlid;
+
+ cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
+ if (wl->ap_fw_ps_map != cur_fw_ps_map) {
+ wl1271_debug(DEBUG_PSM,
+ "link ps prev 0x%x cur 0x%x changed 0x%x",
+ wl->ap_fw_ps_map, cur_fw_ps_map,
+ wl->ap_fw_ps_map ^ cur_fw_ps_map);
+
+ wl->ap_fw_ps_map = cur_fw_ps_map;
+ }
+
+ for (hlid = WL1271_AP_STA_HLID_START; hlid < AP_MAX_LINKS; hlid++) {
+ u8 cnt = status->tx_lnk_free_blks[hlid] -
+ wl->links[hlid].prev_freed_blks;
+
+ wl->links[hlid].prev_freed_blks =
+ status->tx_lnk_free_blks[hlid];
+ wl->links[hlid].allocated_blks -= cnt;
+
+ wl1271_irq_ps_regulate_link(wl, hlid,
+ wl->links[hlid].allocated_blks);
+ }
+}
+
static void wl1271_fw_status(struct wl1271 *wl,
- struct wl1271_fw_status *status)
+ struct wl1271_fw_full_status *full_status)
{
+ struct wl1271_fw_common_status *status = &full_status->common;
struct timespec ts;
- u32 total = 0;
+ u32 old_tx_blk_count = wl->tx_blocks_available;
+ u32 freed_blocks = 0;
int i;
- wl1271_raw_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false);
+ if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ wl1271_raw_read(wl, FW_STATUS_ADDR, status,
+ sizeof(struct wl1271_fw_ap_status), false);
+ } else {
+ wl1271_raw_read(wl, FW_STATUS_ADDR, status,
+ sizeof(struct wl1271_fw_sta_status), false);
+ }
wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
"drv_rx_counter = %d, tx_results_counter = %d)",
@@ -494,17 +662,36 @@ static void wl1271_fw_status(struct wl1271 *wl,
/* update number of available TX blocks */
for (i = 0; i < NUM_TX_QUEUES; i++) {
- u32 cnt = le32_to_cpu(status->tx_released_blks[i]) -
- wl->tx_blocks_freed[i];
+ freed_blocks += le32_to_cpu(status->tx_released_blks[i]) -
+ wl->tx_blocks_freed[i];
wl->tx_blocks_freed[i] =
le32_to_cpu(status->tx_released_blks[i]);
- wl->tx_blocks_available += cnt;
- total += cnt;
+ }
+
+ wl->tx_allocated_blocks -= freed_blocks;
+
+ if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ /* Update num of allocated TX blocks per link and ps status */
+ wl1271_irq_update_links_status(wl, &full_status->ap);
+ wl->tx_blocks_available += freed_blocks;
+ } else {
+ int avail = full_status->sta.tx_total - wl->tx_allocated_blocks;
+
+ /*
+ * The FW might change the total number of TX memblocks before
+ * we get a notification about blocks being released. Thus, the
+ * available blocks calculation might yield a temporary result
+ * which is lower than the actual available blocks. Keeping in
+ * mind that only blocks that were allocated can be moved from
+ * TX to RX, tx_blocks_available should never decrease here.
+ */
+ wl->tx_blocks_available = max((int)wl->tx_blocks_available,
+ avail);
}
/* if more blocks are available now, tx work can be scheduled */
- if (total)
+ if (wl->tx_blocks_available > old_tx_blk_count)
clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
/* update the host-chipset time offset */
@@ -513,16 +700,51 @@ static void wl1271_fw_status(struct wl1271 *wl,
(s64)le32_to_cpu(status->fw_localtime);
}
-#define WL1271_IRQ_MAX_LOOPS 10
+static void wl1271_flush_deferred_work(struct wl1271 *wl)
+{
+ struct sk_buff *skb;
-static void wl1271_irq_work(struct work_struct *work)
+ /* Pass all received frames to the network stack */
+ while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
+ ieee80211_rx_ni(wl->hw, skb);
+
+ /* Return sent skbs to the network stack */
+ while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
+ ieee80211_tx_status(wl->hw, skb);
+}
+
+static void wl1271_netstack_work(struct work_struct *work)
+{
+ struct wl1271 *wl =
+ container_of(work, struct wl1271, netstack_work);
+
+ do {
+ wl1271_flush_deferred_work(wl);
+ } while (skb_queue_len(&wl->deferred_rx_queue));
+}
+
+#define WL1271_IRQ_MAX_LOOPS 256
+
+irqreturn_t wl1271_irq(int irq, void *cookie)
{
int ret;
u32 intr;
int loopcount = WL1271_IRQ_MAX_LOOPS;
+ struct wl1271 *wl = (struct wl1271 *)cookie;
+ bool done = false;
+ unsigned int defer_count;
unsigned long flags;
- struct wl1271 *wl =
- container_of(work, struct wl1271, irq_work);
+
+ /* TX might be handled here, avoid redundant work */
+ set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
+ cancel_work_sync(&wl->tx_work);
+
+ /*
+ * In case edge triggered interrupt must be used, we cannot iterate
+ * more than once without introducing race conditions with the hardirq.
+ */
+ if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
+ loopcount = 1;
mutex_lock(&wl->mutex);
@@ -531,26 +753,27 @@ static void wl1271_irq_work(struct work_struct *work)
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- ret = wl1271_ps_elp_wakeup(wl, true);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
- spin_lock_irqsave(&wl->wl_lock, flags);
- while (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags) && loopcount) {
- clear_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- loopcount--;
+ while (!done && loopcount--) {
+ /*
+ * In order to avoid a race with the hardirq, clear the flag
+ * before acknowledging the chip. Since the mutex is held,
+ * wl1271_ps_elp_wakeup cannot be called concurrently.
+ */
+ clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
+ smp_mb__after_clear_bit();
wl1271_fw_status(wl, wl->fw_status);
- intr = le32_to_cpu(wl->fw_status->intr);
+ intr = le32_to_cpu(wl->fw_status->common.intr);
+ intr &= WL1271_INTR_MASK;
if (!intr) {
- wl1271_debug(DEBUG_IRQ, "Zero interrupt received.");
- spin_lock_irqsave(&wl->wl_lock, flags);
+ done = true;
continue;
}
- intr &= WL1271_INTR_MASK;
-
if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
wl1271_error("watchdog interrupt received! "
"starting recovery.");
@@ -560,25 +783,35 @@ static void wl1271_irq_work(struct work_struct *work)
goto out;
}
- if (intr & WL1271_ACX_INTR_DATA) {
+ if (likely(intr & WL1271_ACX_INTR_DATA)) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
- /* check for tx results */
- if (wl->fw_status->tx_results_counter !=
- (wl->tx_results_count & 0xff))
- wl1271_tx_complete(wl);
+ wl1271_rx(wl, &wl->fw_status->common);
/* Check if any tx blocks were freed */
+ spin_lock_irqsave(&wl->wl_lock, flags);
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
wl->tx_queue_count) {
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
/*
* In order to avoid starvation of the TX path,
* call the work function directly.
*/
wl1271_tx_work_locked(wl);
+ } else {
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
}
- wl1271_rx(wl, wl->fw_status);
+ /* check for tx results */
+ if (wl->fw_status->common.tx_results_counter !=
+ (wl->tx_results_count & 0xff))
+ wl1271_tx_complete(wl);
+
+ /* Make sure the deferred queues don't get too long */
+ defer_count = skb_queue_len(&wl->deferred_tx_queue) +
+ skb_queue_len(&wl->deferred_rx_queue);
+ if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
+ wl1271_flush_deferred_work(wl);
}
if (intr & WL1271_ACX_INTR_EVENT_A) {
@@ -597,28 +830,54 @@ static void wl1271_irq_work(struct work_struct *work)
if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
-
- spin_lock_irqsave(&wl->wl_lock, flags);
}
- if (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags))
- ieee80211_queue_work(wl->hw, &wl->irq_work);
- else
- clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
- spin_unlock_irqrestore(&wl->wl_lock, flags);
-
wl1271_ps_elp_sleep(wl);
out:
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ /* In case TX was not handled here, queue TX work */
+ clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
+ if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
+ wl->tx_queue_count)
+ ieee80211_queue_work(wl->hw, &wl->tx_work);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
mutex_unlock(&wl->mutex);
+
+ return IRQ_HANDLED;
}
+EXPORT_SYMBOL_GPL(wl1271_irq);
static int wl1271_fetch_firmware(struct wl1271 *wl)
{
const struct firmware *fw;
+ const char *fw_name;
int ret;
- ret = request_firmware(&fw, WL1271_FW_NAME, wl1271_wl_to_dev(wl));
+ switch (wl->bss_type) {
+ case BSS_TYPE_AP_BSS:
+ if (wl->chip.id == CHIP_ID_1283_PG20)
+ fw_name = WL128X_AP_FW_NAME;
+ else
+ fw_name = WL127X_AP_FW_NAME;
+ break;
+ case BSS_TYPE_IBSS:
+ case BSS_TYPE_STA_BSS:
+ if (wl->chip.id == CHIP_ID_1283_PG20)
+ fw_name = WL128X_FW_NAME;
+ else
+ fw_name = WL1271_FW_NAME;
+ break;
+ default:
+ wl1271_error("no compatible firmware for bss_type %d",
+ wl->bss_type);
+ return -EINVAL;
+ }
+
+ wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
+
+ ret = request_firmware(&fw, fw_name, wl1271_wl_to_dev(wl));
if (ret < 0) {
wl1271_error("could not get firmware: %d", ret);
@@ -632,6 +891,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
goto out;
}
+ vfree(wl->fw);
wl->fw_len = fw->size;
wl->fw = vmalloc(wl->fw_len);
@@ -642,7 +902,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
}
memcpy(wl->fw, fw->data, wl->fw_len);
-
+ wl->fw_bss_type = wl->bss_type;
ret = 0;
out:
@@ -656,14 +916,14 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
const struct firmware *fw;
int ret;
- ret = request_firmware(&fw, WL1271_NVS_NAME, wl1271_wl_to_dev(wl));
+ ret = request_firmware(&fw, WL12XX_NVS_NAME, wl1271_wl_to_dev(wl));
if (ret < 0) {
wl1271_error("could not get nvs file: %d", ret);
return ret;
}
- wl->nvs = kmemdup(fw->data, sizeof(struct wl1271_nvs_file), GFP_KERNEL);
+ wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
if (!wl->nvs) {
wl1271_error("could not allocate memory for the nvs file");
@@ -768,17 +1028,33 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
wl->chip.id);
+ /* end-of-transaction flag should be set in wl127x AP mode */
+ if (wl->bss_type == BSS_TYPE_AP_BSS)
+ wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION;
+
+ ret = wl1271_setup(wl);
+ if (ret < 0)
+ goto out;
+ break;
+ case CHIP_ID_1283_PG20:
+ wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)",
+ wl->chip.id);
+
ret = wl1271_setup(wl);
if (ret < 0)
goto out;
+ if (wl1271_set_block_size(wl))
+ wl->quirks |= WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT;
break;
+ case CHIP_ID_1283_PG10:
default:
wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
ret = -ENODEV;
goto out;
}
- if (wl->fw == NULL) {
+ /* Make sure the firmware type matches the BSS type */
+ if (wl->fw == NULL || wl->fw_bss_type != wl->bss_type) {
ret = wl1271_fetch_firmware(wl);
if (ret < 0)
goto out;
@@ -795,6 +1071,24 @@ out:
return ret;
}
+static unsigned int wl1271_get_fw_ver_quirks(struct wl1271 *wl)
+{
+ unsigned int quirks = 0;
+ unsigned int *fw_ver = wl->chip.fw_ver;
+
+ /* Only for wl127x */
+ if ((fw_ver[FW_VER_CHIP] == FW_VER_CHIP_WL127X) &&
+ /* Check STA version */
+ (((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) &&
+ (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_1_SPARE_STA_MIN)) ||
+ /* Check AP version */
+ ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_AP) &&
+ (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_1_SPARE_AP_MIN))))
+ quirks |= WL12XX_QUIRK_USE_2_SPARE_BLOCKS;
+
+ return quirks;
+}
+
int wl1271_plt_start(struct wl1271 *wl)
{
int retries = WL1271_BOOT_RETRIES;
@@ -811,6 +1105,8 @@ int wl1271_plt_start(struct wl1271 *wl)
goto out;
}
+ wl->bss_type = BSS_TYPE_STA_BSS;
+
while (retries) {
retries--;
ret = wl1271_chip_wakeup(wl);
@@ -827,11 +1123,13 @@ int wl1271_plt_start(struct wl1271 *wl)
wl->state = WL1271_STATE_PLT;
wl1271_notice("firmware booted in PLT mode (%s)",
- wl->chip.fw_ver);
+ wl->chip.fw_ver_str);
+
+ /* Check if any quirks are needed with older fw versions */
+ wl->quirks |= wl1271_get_fw_ver_quirks(wl);
goto out;
irq_disable:
- wl1271_disable_interrupts(wl);
mutex_unlock(&wl->mutex);
/* Unlocking the mutex in the middle of handling is
inherently unsafe. In this case we deem it safe to do,
@@ -840,7 +1138,9 @@ irq_disable:
work function will not do anything.) Also, any other
possible concurrent operations will fail due to the
current state, hence the wl1271 struct should be safe. */
- cancel_work_sync(&wl->irq_work);
+ wl1271_disable_interrupts(wl);
+ wl1271_flush_deferred_work(wl);
+ cancel_work_sync(&wl->netstack_work);
mutex_lock(&wl->mutex);
power_off:
wl1271_power_off(wl);
@@ -854,12 +1154,10 @@ out:
return ret;
}
-int wl1271_plt_stop(struct wl1271 *wl)
+static int __wl1271_plt_stop(struct wl1271 *wl)
{
int ret = 0;
- mutex_lock(&wl->mutex);
-
wl1271_notice("power down");
if (wl->state != WL1271_STATE_PLT) {
@@ -869,87 +1167,142 @@ int wl1271_plt_stop(struct wl1271 *wl)
goto out;
}
- wl1271_disable_interrupts(wl);
wl1271_power_off(wl);
wl->state = WL1271_STATE_OFF;
wl->rx_counter = 0;
-out:
mutex_unlock(&wl->mutex);
-
- cancel_work_sync(&wl->irq_work);
+ wl1271_disable_interrupts(wl);
+ wl1271_flush_deferred_work(wl);
+ cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->recovery_work);
+ mutex_lock(&wl->mutex);
+out:
+ return ret;
+}
+
+int wl1271_plt_stop(struct wl1271 *wl)
+{
+ int ret;
+ mutex_lock(&wl->mutex);
+ ret = __wl1271_plt_stop(wl);
+ mutex_unlock(&wl->mutex);
return ret;
}
static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct wl1271 *wl = hw->priv;
- struct ieee80211_conf *conf = &hw->conf;
- struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
- struct ieee80211_sta *sta = txinfo->control.sta;
unsigned long flags;
int q;
+ u8 hlid = 0;
+
+ q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
+
+ if (wl->bss_type == BSS_TYPE_AP_BSS)
+ hlid = wl1271_tx_get_hlid(skb);
- /*
- * peek into the rates configured in the STA entry.
- * The rates set after connection stage, The first block only BG sets:
- * the compare is for bit 0-16 of sta_rate_set. The second block add
- * HT rates in case of HT supported.
- */
spin_lock_irqsave(&wl->wl_lock, flags);
- if (sta &&
- (sta->supp_rates[conf->channel->band] !=
- (wl->sta_rate_set & HW_BG_RATES_MASK))) {
- wl->sta_rate_set = sta->supp_rates[conf->channel->band];
- set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
- }
-#ifdef CONFIG_WL12XX_HT
- if (sta &&
- sta->ht_cap.ht_supported &&
- ((wl->sta_rate_set >> HW_HT_RATES_OFFSET) !=
- sta->ht_cap.mcs.rx_mask[0])) {
- /* Clean MCS bits before setting them */
- wl->sta_rate_set &= HW_BG_RATES_MASK;
- wl->sta_rate_set |=
- (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET);
- set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
- }
-#endif
wl->tx_queue_count++;
- spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ /*
+ * The workqueue is slow to process the tx_queue and we need stop
+ * the queue here, otherwise the queue will get too long.
+ */
+ if (wl->tx_queue_count >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
+ wl1271_debug(DEBUG_TX, "op_tx: stopping queues");
+ ieee80211_stop_queues(wl->hw);
+ set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
+ }
/* queue the packet */
- q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
- skb_queue_tail(&wl->tx_queue[q], skb);
+ if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
+ skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
+ } else {
+ skb_queue_tail(&wl->tx_queue[q], skb);
+ }
/*
* The chip specific setup must run before the first TX packet -
* before that, the tx_work will not be initialized!
*/
- if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
+ if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
+ !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
ieee80211_queue_work(wl->hw, &wl->tx_work);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+int wl1271_tx_dummy_packet(struct wl1271 *wl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
+ wl->tx_queue_count++;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ /* The FW is low on RX memory blocks, so send the dummy packet asap */
+ if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
+ wl1271_tx_work_locked(wl);
+
/*
- * The workqueue is slow to process the tx_queue and we need stop
- * the queue here, otherwise the queue will get too long.
+ * If the FW TX is busy, TX work will be scheduled by the threaded
+ * interrupt handler function
*/
- if (wl->tx_queue_count >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
- wl1271_debug(DEBUG_TX, "op_tx: stopping queues");
+ return 0;
+}
- spin_lock_irqsave(&wl->wl_lock, flags);
- ieee80211_stop_queues(wl->hw);
- set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
- spin_unlock_irqrestore(&wl->wl_lock, flags);
+/*
+ * The size of the dummy packet should be at least 1400 bytes. However, in
+ * order to minimize the number of bus transactions, aligning it to 512 bytes
+ * boundaries could be beneficial, performance wise
+ */
+#define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
+
+static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
+{
+ struct sk_buff *skb;
+ struct ieee80211_hdr_3addr *hdr;
+ unsigned int dummy_packet_size;
+
+ dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
+ sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
+
+ skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
+ if (!skb) {
+ wl1271_warning("Failed to allocate a dummy packet skb");
+ return NULL;
}
- return NETDEV_TX_OK;
+ skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
+
+ hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_NULLFUNC |
+ IEEE80211_FCTL_TODS);
+
+ memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
+
+ /* Dummy packets require the TID to be management */
+ skb->priority = WL1271_TID_MGMT;
+
+ /* Initialize all fields that might be used */
+ skb_set_queue_mapping(skb, 0);
+ memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
+
+ return skb;
}
+
static struct notifier_block wl1271_dev_notifier = {
.notifier_call = wl1271_dev_notify,
};
@@ -967,6 +1320,9 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
*
* The MAC address is first known when the corresponding interface
* is added. That is where we will initialize the hardware.
+ *
+ * In addition, we currently have different firmwares for AP and managed
+ * operation. We will know which to boot according to interface type.
*/
return 0;
@@ -997,6 +1353,16 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
goto out;
}
+ /*
+ * in some very corner case HW recovery scenarios its possible to
+ * get here before __wl1271_op_remove_interface is complete, so
+ * opt out if that is the case.
+ */
+ if (test_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
switch (vif->type) {
case NL80211_IFTYPE_STATION:
wl->bss_type = BSS_TYPE_STA_BSS;
@@ -1006,6 +1372,9 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
wl->bss_type = BSS_TYPE_IBSS;
wl->set_bss_type = BSS_TYPE_STA_BSS;
break;
+ case NL80211_IFTYPE_AP:
+ wl->bss_type = BSS_TYPE_AP_BSS;
+ break;
default:
ret = -EOPNOTSUPP;
goto out;
@@ -1038,7 +1407,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
break;
irq_disable:
- wl1271_disable_interrupts(wl);
mutex_unlock(&wl->mutex);
/* Unlocking the mutex in the middle of handling is
inherently unsafe. In this case we deem it safe to do,
@@ -1047,7 +1415,9 @@ irq_disable:
work function will not do anything.) Also, any other
possible concurrent operations will fail due to the
current state, hence the wl1271 struct should be safe. */
- cancel_work_sync(&wl->irq_work);
+ wl1271_disable_interrupts(wl);
+ wl1271_flush_deferred_work(wl);
+ cancel_work_sync(&wl->netstack_work);
mutex_lock(&wl->mutex);
power_off:
wl1271_power_off(wl);
@@ -1061,13 +1431,17 @@ power_off:
wl->vif = vif;
wl->state = WL1271_STATE_ON;
- wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
+ set_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags);
+ wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
/* update hw/fw version info in wiphy struct */
wiphy->hw_version = wl->chip.id;
- strncpy(wiphy->fw_version, wl->chip.fw_ver,
+ strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
sizeof(wiphy->fw_version));
+ /* Check if any quirks are needed with older fw versions */
+ wl->quirks |= wl1271_get_fw_ver_quirks(wl);
+
/*
* Now we know if 11a is supported (info from the NVS), so disable
* 11a channels if not supported
@@ -1081,8 +1455,10 @@ power_off:
out:
mutex_unlock(&wl->mutex);
+ mutex_lock(&wl_list_mutex);
if (!ret)
list_add(&wl->list, &wl_list);
+ mutex_unlock(&wl_list_mutex);
return ret;
}
@@ -1093,11 +1469,15 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
+ /* because of hardware recovery, we may get here twice */
+ if (wl->state != WL1271_STATE_ON)
+ return;
+
wl1271_info("down");
+ mutex_lock(&wl_list_mutex);
list_del(&wl->list);
-
- WARN_ON(wl->state != WL1271_STATE_ON);
+ mutex_unlock(&wl_list_mutex);
/* enable dyn ps just in case (if left on due to fw crash etc) */
if (wl->bss_type == BSS_TYPE_STA_BSS)
@@ -1105,20 +1485,23 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
wl->scan.state = WL1271_SCAN_STATE_IDLE;
- kfree(wl->scan.scanned_ch);
- wl->scan.scanned_ch = NULL;
+ memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
wl->scan.req = NULL;
ieee80211_scan_completed(wl->hw, true);
}
+ /*
+ * this must be before the cancel_work calls below, so that the work
+ * functions don't perform further work.
+ */
wl->state = WL1271_STATE_OFF;
- wl1271_disable_interrupts(wl);
-
mutex_unlock(&wl->mutex);
+ wl1271_disable_interrupts(wl);
+ wl1271_flush_deferred_work(wl);
cancel_delayed_work_sync(&wl->scan_complete_work);
- cancel_work_sync(&wl->irq_work);
+ cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->tx_work);
cancel_delayed_work_sync(&wl->pspoll_work);
cancel_delayed_work_sync(&wl->elp_work);
@@ -1140,6 +1523,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
wl->psm_entry_retry = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
wl->tx_blocks_available = 0;
+ wl->tx_allocated_blocks = 0;
wl->tx_results_count = 0;
wl->tx_packets_count = 0;
wl->tx_security_last_seq = 0;
@@ -1147,10 +1531,19 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
wl->time_offset = 0;
wl->session_counter = 0;
wl->rate_set = CONF_TX_RATE_MASK_BASIC;
- wl->sta_rate_set = 0;
- wl->flags = 0;
wl->vif = NULL;
wl->filters = 0;
+ wl1271_free_ap_keys(wl);
+ memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map));
+ wl->ap_fw_ps_map = 0;
+ wl->ap_ps_map = 0;
+
+ /*
+ * this is performed after the cancel_work calls and the associated
+ * mutex_lock, so that wl1271_op_add_interface does not accidentally
+ * get executed before all these vars have been reset.
+ */
+ wl->flags = 0;
for (i = 0; i < NUM_TX_QUEUES; i++)
wl->tx_blocks_freed[i] = 0;
@@ -1184,10 +1577,9 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
cancel_work_sync(&wl->recovery_work);
}
-static void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters)
+void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters)
{
- wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
- wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
+ wl1271_set_default_filters(wl);
/* combine requested filters with current filter config */
filters = wl->filters | filters;
@@ -1248,10 +1640,10 @@ static int wl1271_join(struct wl1271 *wl, bool set_assoc)
* One of the side effects of the JOIN command is that is clears
* WPA/WPA2 keys from the chipset. Performing a JOIN while associated
* to a WPA/WPA2 access point will therefore kill the data-path.
- * Currently there is no supported scenario for JOIN during
- * association - if it becomes a supported scenario, the WPA/WPA2 keys
- * must be handled somehow.
- *
+ * Currently the only valid scenario for JOIN during association
+ * is on roaming, in which case we will also be given new keys.
+ * Keep the below message for now, unless it starts bothering
+ * users who really like to roam a lot :)
*/
if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
wl1271_info("JOIN while associated.");
@@ -1307,7 +1699,7 @@ static int wl1271_unjoin(struct wl1271 *wl)
clear_bit(WL1271_FLAG_JOINED, &wl->flags);
memset(wl->bssid, 0, ETH_ALEN);
- /* stop filterting packets based on bssid */
+ /* stop filtering packets based on bssid */
wl1271_configure_filters(wl, FIF_OTHER_BSS);
out:
@@ -1322,25 +1714,7 @@ static void wl1271_set_band_rate(struct wl1271 *wl)
wl->basic_rate_set = wl->conf.tx.basic_rate_5;
}
-static u32 wl1271_min_rate_get(struct wl1271 *wl)
-{
- int i;
- u32 rate = 0;
-
- if (!wl->basic_rate_set) {
- WARN_ON(1);
- wl->basic_rate_set = wl->conf.tx.basic_rate;
- }
-
- for (i = 0; !rate; i++) {
- if ((wl->basic_rate_set >> i) & 0x1)
- rate = 1 << i;
- }
-
- return rate;
-}
-
-static int wl1271_handle_idle(struct wl1271 *wl, bool idle)
+static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
{
int ret;
@@ -1350,9 +1724,8 @@ static int wl1271_handle_idle(struct wl1271 *wl, bool idle)
if (ret < 0)
goto out;
}
- wl->rate_set = wl1271_min_rate_get(wl);
- wl->sta_rate_set = 0;
- ret = wl1271_acx_rate_policies(wl);
+ wl->rate_set = wl1271_tx_min_rate_get(wl);
+ ret = wl1271_acx_sta_rate_policies(wl);
if (ret < 0)
goto out;
ret = wl1271_acx_keep_alive_config(
@@ -1381,14 +1754,17 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
struct wl1271 *wl = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
int channel, ret = 0;
+ bool is_ap;
channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
- wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s",
+ wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
+ " changed 0x%x",
channel,
conf->flags & IEEE80211_CONF_PS ? "on" : "off",
conf->power_level,
- conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use");
+ conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
+ changed);
/*
* mac80211 will go to idle nearly immediately after transmitting some
@@ -1402,11 +1778,18 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&wl->mutex);
if (unlikely(wl->state == WL1271_STATE_OFF)) {
- ret = -EAGAIN;
+ /* we support configuring the channel and band while off */
+ if ((changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
+ wl->band = conf->channel->band;
+ wl->channel = channel;
+ }
+
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl, false);
+ is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -1417,31 +1800,34 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
wl->band = conf->channel->band;
wl->channel = channel;
- /*
- * FIXME: the mac80211 should really provide a fixed rate
- * to use here. for now, just use the smallest possible rate
- * for the band as a fixed rate for association frames and
- * other control messages.
- */
- if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
- wl1271_set_band_rate(wl);
-
- wl->basic_rate = wl1271_min_rate_get(wl);
- ret = wl1271_acx_rate_policies(wl);
- if (ret < 0)
- wl1271_warning("rate policy for update channel "
- "failed %d", ret);
+ if (!is_ap) {
+ /*
+ * FIXME: the mac80211 should really provide a fixed
+ * rate to use here. for now, just use the smallest
+ * possible rate for the band as a fixed rate for
+ * association frames and other control messages.
+ */
+ if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ wl1271_set_band_rate(wl);
- if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
- ret = wl1271_join(wl, false);
+ wl->basic_rate = wl1271_tx_min_rate_get(wl);
+ ret = wl1271_acx_sta_rate_policies(wl);
if (ret < 0)
- wl1271_warning("cmd join to update channel "
+ wl1271_warning("rate policy for channel "
"failed %d", ret);
+
+ if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
+ ret = wl1271_join(wl, false);
+ if (ret < 0)
+ wl1271_warning("cmd join on channel "
+ "failed %d", ret);
+ }
}
}
- if (changed & IEEE80211_CONF_CHANGE_IDLE) {
- ret = wl1271_handle_idle(wl, conf->flags & IEEE80211_CONF_IDLE);
+ if (changed & IEEE80211_CONF_CHANGE_IDLE && !is_ap) {
+ ret = wl1271_sta_handle_idle(wl,
+ conf->flags & IEEE80211_CONF_IDLE);
if (ret < 0)
wl1271_warning("idle mode change failed %d", ret);
}
@@ -1548,7 +1934,8 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
struct wl1271 *wl = hw->priv;
int ret;
- wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter");
+ wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
+ " total %x", changed, *total);
mutex_lock(&wl->mutex);
@@ -1558,19 +1945,20 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
-
- if (*total & FIF_ALLMULTI)
- ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
- else if (fp)
- ret = wl1271_acx_group_address_tbl(wl, fp->enabled,
- fp->mc_list,
- fp->mc_list_length);
- if (ret < 0)
- goto out_sleep;
+ if (wl->bss_type != BSS_TYPE_AP_BSS) {
+ if (*total & FIF_ALLMULTI)
+ ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
+ else if (fp)
+ ret = wl1271_acx_group_address_tbl(wl, fp->enabled,
+ fp->mc_list,
+ fp->mc_list_length);
+ if (ret < 0)
+ goto out_sleep;
+ }
/* determine, whether supported filter values have changed */
if (changed == 0)
@@ -1593,38 +1981,192 @@ out:
kfree(fp);
}
+static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
+ u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
+ u16 tx_seq_16)
+{
+ struct wl1271_ap_key *ap_key;
+ int i;
+
+ wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
+
+ if (key_size > MAX_KEY_SIZE)
+ return -EINVAL;
+
+ /*
+ * Find next free entry in ap_keys. Also check we are not replacing
+ * an existing key.
+ */
+ for (i = 0; i < MAX_NUM_KEYS; i++) {
+ if (wl->recorded_ap_keys[i] == NULL)
+ break;
+
+ if (wl->recorded_ap_keys[i]->id == id) {
+ wl1271_warning("trying to record key replacement");
+ return -EINVAL;
+ }
+ }
+
+ if (i == MAX_NUM_KEYS)
+ return -EBUSY;
+
+ ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
+ if (!ap_key)
+ return -ENOMEM;
+
+ ap_key->id = id;
+ ap_key->key_type = key_type;
+ ap_key->key_size = key_size;
+ memcpy(ap_key->key, key, key_size);
+ ap_key->hlid = hlid;
+ ap_key->tx_seq_32 = tx_seq_32;
+ ap_key->tx_seq_16 = tx_seq_16;
+
+ wl->recorded_ap_keys[i] = ap_key;
+ return 0;
+}
+
+static void wl1271_free_ap_keys(struct wl1271 *wl)
+{
+ int i;
+
+ for (i = 0; i < MAX_NUM_KEYS; i++) {
+ kfree(wl->recorded_ap_keys[i]);
+ wl->recorded_ap_keys[i] = NULL;
+ }
+}
+
+static int wl1271_ap_init_hwenc(struct wl1271 *wl)
+{
+ int i, ret = 0;
+ struct wl1271_ap_key *key;
+ bool wep_key_added = false;
+
+ for (i = 0; i < MAX_NUM_KEYS; i++) {
+ if (wl->recorded_ap_keys[i] == NULL)
+ break;
+
+ key = wl->recorded_ap_keys[i];
+ ret = wl1271_cmd_set_ap_key(wl, KEY_ADD_OR_REPLACE,
+ key->id, key->key_type,
+ key->key_size, key->key,
+ key->hlid, key->tx_seq_32,
+ key->tx_seq_16);
+ if (ret < 0)
+ goto out;
+
+ if (key->key_type == KEY_WEP)
+ wep_key_added = true;
+ }
+
+ if (wep_key_added) {
+ ret = wl1271_cmd_set_ap_default_wep_key(wl, wl->default_key);
+ if (ret < 0)
+ goto out;
+ }
+
+out:
+ wl1271_free_ap_keys(wl);
+ return ret;
+}
+
+static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+ u8 key_size, const u8 *key, u32 tx_seq_32,
+ u16 tx_seq_16, struct ieee80211_sta *sta)
+{
+ int ret;
+ bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+
+ if (is_ap) {
+ struct wl1271_station *wl_sta;
+ u8 hlid;
+
+ if (sta) {
+ wl_sta = (struct wl1271_station *)sta->drv_priv;
+ hlid = wl_sta->hlid;
+ } else {
+ hlid = WL1271_AP_BROADCAST_HLID;
+ }
+
+ if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
+ /*
+ * We do not support removing keys after AP shutdown.
+ * Pretend we do to make mac80211 happy.
+ */
+ if (action != KEY_ADD_OR_REPLACE)
+ return 0;
+
+ ret = wl1271_record_ap_key(wl, id,
+ key_type, key_size,
+ key, hlid, tx_seq_32,
+ tx_seq_16);
+ } else {
+ ret = wl1271_cmd_set_ap_key(wl, action,
+ id, key_type, key_size,
+ key, hlid, tx_seq_32,
+ tx_seq_16);
+ }
+
+ if (ret < 0)
+ return ret;
+ } else {
+ const u8 *addr;
+ static const u8 bcast_addr[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+
+ addr = sta ? sta->addr : bcast_addr;
+
+ if (is_zero_ether_addr(addr)) {
+ /* We dont support TX only encryption */
+ return -EOPNOTSUPP;
+ }
+
+ /* The wl1271 does not allow to remove unicast keys - they
+ will be cleared automatically on next CMD_JOIN. Ignore the
+ request silently, as we dont want the mac80211 to emit
+ an error message. */
+ if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
+ return 0;
+
+ ret = wl1271_cmd_set_sta_key(wl, action,
+ id, key_type, key_size,
+ key, addr, tx_seq_32,
+ tx_seq_16);
+ if (ret < 0)
+ return ret;
+
+ /* the default WEP key needs to be configured at least once */
+ if (key_type == KEY_WEP) {
+ ret = wl1271_cmd_set_sta_default_wep_key(wl,
+ wl->default_key);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key_conf)
{
struct wl1271 *wl = hw->priv;
- const u8 *addr;
int ret;
u32 tx_seq_32 = 0;
u16 tx_seq_16 = 0;
u8 key_type;
- static const u8 bcast_addr[ETH_ALEN] =
- { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
-
wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
- addr = sta ? sta->addr : bcast_addr;
-
- wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x", cmd);
- wl1271_dump(DEBUG_CRYPT, "ADDR: ", addr, ETH_ALEN);
+ wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
key_conf->cipher, key_conf->keyidx,
key_conf->keylen, key_conf->flags);
wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
- if (is_zero_ether_addr(addr)) {
- /* We dont support TX only encryption */
- ret = -EOPNOTSUPP;
- goto out;
- }
-
mutex_lock(&wl->mutex);
if (unlikely(wl->state == WL1271_STATE_OFF)) {
@@ -1632,7 +2174,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
goto out_unlock;
}
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out_unlock;
@@ -1671,36 +2213,21 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
switch (cmd) {
case SET_KEY:
- ret = wl1271_cmd_set_key(wl, KEY_ADD_OR_REPLACE,
- key_conf->keyidx, key_type,
- key_conf->keylen, key_conf->key,
- addr, tx_seq_32, tx_seq_16);
+ ret = wl1271_set_key(wl, KEY_ADD_OR_REPLACE,
+ key_conf->keyidx, key_type,
+ key_conf->keylen, key_conf->key,
+ tx_seq_32, tx_seq_16, sta);
if (ret < 0) {
wl1271_error("Could not add or replace key");
goto out_sleep;
}
-
- /* the default WEP key needs to be configured at least once */
- if (key_type == KEY_WEP) {
- ret = wl1271_cmd_set_default_wep_key(wl,
- wl->default_key);
- if (ret < 0)
- goto out_sleep;
- }
break;
case DISABLE_KEY:
- /* The wl1271 does not allow to remove unicast keys - they
- will be cleared automatically on next CMD_JOIN. Ignore the
- request silently, as we dont want the mac80211 to emit
- an error message. */
- if (!is_broadcast_ether_addr(addr))
- break;
-
- ret = wl1271_cmd_set_key(wl, KEY_REMOVE,
- key_conf->keyidx, key_type,
- key_conf->keylen, key_conf->key,
- addr, 0, 0);
+ ret = wl1271_set_key(wl, KEY_REMOVE,
+ key_conf->keyidx, key_type,
+ key_conf->keylen, key_conf->key,
+ 0, 0, sta);
if (ret < 0) {
wl1271_error("Could not remove key");
goto out_sleep;
@@ -1719,7 +2246,6 @@ out_sleep:
out_unlock:
mutex_unlock(&wl->mutex);
-out:
return ret;
}
@@ -1751,7 +2277,7 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -1777,7 +2303,7 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -1805,7 +2331,7 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -1821,7 +2347,7 @@ out:
return ret;
}
-static void wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
+static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
int offset)
{
u8 *ptr = skb->data + offset;
@@ -1831,89 +2357,231 @@ static void wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
if (ptr[0] == WLAN_EID_SSID) {
wl->ssid_len = ptr[1];
memcpy(wl->ssid, ptr+2, wl->ssid_len);
- return;
+ return 0;
}
ptr += (ptr[1] + 2);
}
+
wl1271_error("No SSID in IEs!\n");
+ return -ENOENT;
}
-static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
+static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
- enum wl1271_cmd_ps_mode mode;
- struct wl1271 *wl = hw->priv;
- struct ieee80211_sta *sta = ieee80211_find_sta(vif, bss_conf->bssid);
- bool do_join = false;
- bool set_assoc = false;
- int ret;
+ int ret = 0;
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ if (bss_conf->use_short_slot)
+ ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
+ else
+ ret = wl1271_acx_slot(wl, SLOT_TIME_LONG);
+ if (ret < 0) {
+ wl1271_warning("Set slot time failed %d", ret);
+ goto out;
+ }
+ }
- wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed");
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ if (bss_conf->use_short_preamble)
+ wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT);
+ else
+ wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG);
+ }
- mutex_lock(&wl->mutex);
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+ if (bss_conf->use_cts_prot)
+ ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE);
+ else
+ ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE);
+ if (ret < 0) {
+ wl1271_warning("Set ctsprotect failed %d", ret);
+ goto out;
+ }
+ }
- if (unlikely(wl->state == WL1271_STATE_OFF))
- goto out;
+out:
+ return ret;
+}
- ret = wl1271_ps_elp_wakeup(wl, false);
- if (ret < 0)
- goto out;
+static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ int ret = 0;
- if ((changed & BSS_CHANGED_BEACON_INT) &&
- (wl->bss_type == BSS_TYPE_IBSS)) {
- wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon interval updated: %d",
+ if ((changed & BSS_CHANGED_BEACON_INT)) {
+ wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
bss_conf->beacon_int);
wl->beacon_int = bss_conf->beacon_int;
- do_join = true;
}
- if ((changed & BSS_CHANGED_BEACON) &&
- (wl->bss_type == BSS_TYPE_IBSS)) {
- struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
+ if ((changed & BSS_CHANGED_BEACON)) {
+ struct ieee80211_hdr *hdr;
+ int ieoffset = offsetof(struct ieee80211_mgmt,
+ u.beacon.variable);
+ struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
+ u16 tmpl_id;
- wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon updated");
+ if (!beacon)
+ goto out;
- if (beacon) {
- struct ieee80211_hdr *hdr;
- int ieoffset = offsetof(struct ieee80211_mgmt,
- u.beacon.variable);
+ wl1271_debug(DEBUG_MASTER, "beacon updated");
- wl1271_ssid_set(wl, beacon, ieoffset);
+ ret = wl1271_ssid_set(wl, beacon, ieoffset);
+ if (ret < 0) {
+ dev_kfree_skb(beacon);
+ goto out;
+ }
+ tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
+ CMD_TEMPL_BEACON;
+ ret = wl1271_cmd_template_set(wl, tmpl_id,
+ beacon->data,
+ beacon->len, 0,
+ wl1271_tx_min_rate_get(wl));
+ if (ret < 0) {
+ dev_kfree_skb(beacon);
+ goto out;
+ }
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
- beacon->data,
- beacon->len, 0,
- wl1271_min_rate_get(wl));
+ hdr = (struct ieee80211_hdr *) beacon->data;
+ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_PROBE_RESP);
+
+ tmpl_id = is_ap ? CMD_TEMPL_AP_PROBE_RESPONSE :
+ CMD_TEMPL_PROBE_RESPONSE;
+ ret = wl1271_cmd_template_set(wl,
+ tmpl_id,
+ beacon->data,
+ beacon->len, 0,
+ wl1271_tx_min_rate_get(wl));
+ dev_kfree_skb(beacon);
+ if (ret < 0)
+ goto out;
+ }
- if (ret < 0) {
- dev_kfree_skb(beacon);
- goto out_sleep;
+out:
+ return ret;
+}
+
+/* AP mode changes */
+static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ int ret = 0;
+
+ if ((changed & BSS_CHANGED_BASIC_RATES)) {
+ u32 rates = bss_conf->basic_rates;
+ struct conf_tx_rate_class mgmt_rc;
+
+ wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates);
+ wl->basic_rate = wl1271_tx_min_rate_get(wl);
+ wl1271_debug(DEBUG_AP, "basic rates: 0x%x",
+ wl->basic_rate_set);
+
+ /* update the AP management rate policy with the new rates */
+ mgmt_rc.enabled_rates = wl->basic_rate_set;
+ mgmt_rc.long_retry_limit = 10;
+ mgmt_rc.short_retry_limit = 10;
+ mgmt_rc.aflags = 0;
+ ret = wl1271_acx_ap_rate_policy(wl, &mgmt_rc,
+ ACX_TX_AP_MODE_MGMT_RATE);
+ if (ret < 0) {
+ wl1271_error("AP mgmt policy change failed %d", ret);
+ goto out;
+ }
+ }
+
+ ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
+ if (ret < 0)
+ goto out;
+
+ if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
+ if (bss_conf->enable_beacon) {
+ if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
+ ret = wl1271_cmd_start_bss(wl);
+ if (ret < 0)
+ goto out;
+
+ set_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+ wl1271_debug(DEBUG_AP, "started AP");
+
+ ret = wl1271_ap_init_hwenc(wl);
+ if (ret < 0)
+ goto out;
}
+ } else {
+ if (test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
+ ret = wl1271_cmd_stop_bss(wl);
+ if (ret < 0)
+ goto out;
+
+ clear_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+ wl1271_debug(DEBUG_AP, "stopped AP");
+ }
+ }
+ }
- hdr = (struct ieee80211_hdr *) beacon->data;
- hdr->frame_control = cpu_to_le16(
- IEEE80211_FTYPE_MGMT |
- IEEE80211_STYPE_PROBE_RESP);
+ if (changed & BSS_CHANGED_IBSS) {
+ wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
+ bss_conf->ibss_joined);
- ret = wl1271_cmd_template_set(wl,
- CMD_TEMPL_PROBE_RESPONSE,
- beacon->data,
- beacon->len, 0,
- wl1271_min_rate_get(wl));
- dev_kfree_skb(beacon);
- if (ret < 0)
- goto out_sleep;
+ if (bss_conf->ibss_joined) {
+ u32 rates = bss_conf->basic_rates;
+ wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl,
+ rates);
+ wl->basic_rate = wl1271_tx_min_rate_get(wl);
- /* Need to update the SSID (for filtering etc) */
- do_join = true;
+ /* by default, use 11b rates */
+ wl->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
+ ret = wl1271_acx_sta_rate_policies(wl);
+ if (ret < 0)
+ goto out;
}
}
- if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
- (wl->bss_type == BSS_TYPE_IBSS)) {
+ ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
+ if (ret < 0)
+ goto out;
+out:
+ return;
+}
+
+/* STA/IBSS mode changes */
+static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ bool do_join = false, set_assoc = false;
+ bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
+ u32 sta_rate_set = 0;
+ int ret;
+ struct ieee80211_sta *sta;
+ bool sta_exists = false;
+ struct ieee80211_sta_ht_cap sta_ht_cap;
+
+ if (is_ibss) {
+ ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
+ changed);
+ if (ret < 0)
+ goto out;
+ }
+
+ if ((changed & BSS_CHANGED_BEACON_INT) && is_ibss)
+ do_join = true;
+
+ /* Need to update the SSID (for filtering etc) */
+ if ((changed & BSS_CHANGED_BEACON) && is_ibss)
+ do_join = true;
+
+ if ((changed & BSS_CHANGED_BEACON_ENABLED) && is_ibss) {
wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
bss_conf->enable_beacon ? "enabled" : "disabled");
@@ -1924,7 +2592,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
do_join = true;
}
- if (changed & BSS_CHANGED_CQM) {
+ if ((changed & BSS_CHANGED_CQM)) {
bool enable = false;
if (bss_conf->cqm_rssi_thold)
enable = true;
@@ -1942,24 +2610,70 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
* and enable the BSSID filter
*/
memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
- memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
+ memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
+ if (!is_zero_ether_addr(wl->bssid)) {
ret = wl1271_cmd_build_null_data(wl);
if (ret < 0)
- goto out_sleep;
+ goto out;
ret = wl1271_build_qos_null_data(wl);
if (ret < 0)
- goto out_sleep;
+ goto out;
/* filter out all packets not from this BSSID */
wl1271_configure_filters(wl, 0);
/* Need to update the BSSID (for filtering etc) */
do_join = true;
+ }
}
- if (changed & BSS_CHANGED_ASSOC) {
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (sta) {
+ /* save the supp_rates of the ap */
+ sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band];
+ if (sta->ht_cap.ht_supported)
+ sta_rate_set |=
+ (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET);
+ sta_ht_cap = sta->ht_cap;
+ sta_exists = true;
+ }
+ rcu_read_unlock();
+
+ if (sta_exists) {
+ /* handle new association with HT and HT information change */
+ if ((changed & BSS_CHANGED_HT) &&
+ (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
+ ret = wl1271_acx_set_ht_capabilities(wl, &sta_ht_cap,
+ true);
+ if (ret < 0) {
+ wl1271_warning("Set ht cap true failed %d",
+ ret);
+ goto out;
+ }
+ ret = wl1271_acx_set_ht_information(wl,
+ bss_conf->ht_operation_mode);
+ if (ret < 0) {
+ wl1271_warning("Set ht information failed %d",
+ ret);
+ goto out;
+ }
+ }
+ /* handle new association without HT and disassociation */
+ else if (changed & BSS_CHANGED_ASSOC) {
+ ret = wl1271_acx_set_ht_capabilities(wl, &sta_ht_cap,
+ false);
+ if (ret < 0) {
+ wl1271_warning("Set ht cap false failed %d",
+ ret);
+ goto out;
+ }
+ }
+ }
+
+ if ((changed & BSS_CHANGED_ASSOC)) {
if (bss_conf->assoc) {
u32 rates;
int ieoffset;
@@ -1975,10 +2689,13 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
rates = bss_conf->basic_rates;
wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl,
rates);
- wl->basic_rate = wl1271_min_rate_get(wl);
- ret = wl1271_acx_rate_policies(wl);
+ wl->basic_rate = wl1271_tx_min_rate_get(wl);
+ if (sta_rate_set)
+ wl->rate_set = wl1271_tx_enabled_rates_get(wl,
+ sta_rate_set);
+ ret = wl1271_acx_sta_rate_policies(wl);
if (ret < 0)
- goto out_sleep;
+ goto out;
/*
* with wl1271, we don't need to update the
@@ -1988,7 +2705,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
*/
ret = wl1271_cmd_build_ps_poll(wl, wl->aid);
if (ret < 0)
- goto out_sleep;
+ goto out;
/*
* Get a template for hardware connection maintenance
@@ -2002,22 +2719,26 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
/* enable the connection monitoring feature */
ret = wl1271_acx_conn_monit_params(wl, true);
if (ret < 0)
- goto out_sleep;
+ goto out;
/* If we want to go in PSM but we're not there yet */
if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) &&
!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+ enum wl1271_cmd_ps_mode mode;
+
mode = STATION_POWER_SAVE_MODE;
ret = wl1271_ps_set_mode(wl, mode,
wl->basic_rate,
true);
if (ret < 0)
- goto out_sleep;
+ goto out;
}
} else {
/* use defaults when not associated */
+ bool was_assoc =
+ !!test_and_clear_bit(WL1271_FLAG_STA_ASSOCIATED,
+ &wl->flags);
clear_bit(WL1271_FLAG_STA_STATE_SENT, &wl->flags);
- clear_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
wl->aid = 0;
/* free probe-request template */
@@ -2029,10 +2750,10 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
/* revert back to minimum rates for the current band */
wl1271_set_band_rate(wl);
- wl->basic_rate = wl1271_min_rate_get(wl);
- ret = wl1271_acx_rate_policies(wl);
+ wl->basic_rate = wl1271_tx_min_rate_get(wl);
+ ret = wl1271_acx_sta_rate_policies(wl);
if (ret < 0)
- goto out_sleep;
+ goto out;
/* disable connection monitor features */
ret = wl1271_acx_conn_monit_params(wl, false);
@@ -2040,74 +2761,19 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
/* Disable the keep-alive feature */
ret = wl1271_acx_keep_alive_mode(wl, false);
if (ret < 0)
- goto out_sleep;
+ goto out;
/* restore the bssid filter and go to dummy bssid */
- wl1271_unjoin(wl);
- wl1271_dummy_join(wl);
- }
-
- }
-
- if (changed & BSS_CHANGED_ERP_SLOT) {
- if (bss_conf->use_short_slot)
- ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
- else
- ret = wl1271_acx_slot(wl, SLOT_TIME_LONG);
- if (ret < 0) {
- wl1271_warning("Set slot time failed %d", ret);
- goto out_sleep;
- }
- }
-
- if (changed & BSS_CHANGED_ERP_PREAMBLE) {
- if (bss_conf->use_short_preamble)
- wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT);
- else
- wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG);
- }
-
- if (changed & BSS_CHANGED_ERP_CTS_PROT) {
- if (bss_conf->use_cts_prot)
- ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE);
- else
- ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE);
- if (ret < 0) {
- wl1271_warning("Set ctsprotect failed %d", ret);
- goto out_sleep;
+ if (was_assoc) {
+ wl1271_unjoin(wl);
+ wl1271_dummy_join(wl);
+ }
}
}
- /*
- * Takes care of: New association with HT enable,
- * HT information change in beacon.
- */
- if (sta &&
- (changed & BSS_CHANGED_HT) &&
- (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
- ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true);
- if (ret < 0) {
- wl1271_warning("Set ht cap true failed %d", ret);
- goto out_sleep;
- }
- ret = wl1271_acx_set_ht_information(wl,
- bss_conf->ht_operation_mode);
- if (ret < 0) {
- wl1271_warning("Set ht information failed %d", ret);
- goto out_sleep;
- }
- }
- /*
- * Takes care of: New association without HT,
- * Disassociation.
- */
- else if (sta && (changed & BSS_CHANGED_ASSOC)) {
- ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, false);
- if (ret < 0) {
- wl1271_warning("Set ht cap false failed %d", ret);
- goto out_sleep;
- }
- }
+ ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
+ if (ret < 0)
+ goto out;
if (changed & BSS_CHANGED_ARP_FILTER) {
__be32 addr = bss_conf->arp_addr_list[0];
@@ -2124,29 +2790,57 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
ret = wl1271_cmd_build_arp_rsp(wl, addr);
if (ret < 0) {
wl1271_warning("build arp rsp failed: %d", ret);
- goto out_sleep;
+ goto out;
}
ret = wl1271_acx_arp_ip_filter(wl,
- (ACX_ARP_FILTER_ARP_FILTERING |
- ACX_ARP_FILTER_AUTO_ARP),
+ ACX_ARP_FILTER_ARP_FILTERING,
addr);
} else
ret = wl1271_acx_arp_ip_filter(wl, 0, addr);
if (ret < 0)
- goto out_sleep;
+ goto out;
}
if (do_join) {
ret = wl1271_join(wl, set_assoc);
if (ret < 0) {
wl1271_warning("cmd join failed %d", ret);
- goto out_sleep;
+ goto out;
}
}
-out_sleep:
+out:
+ return;
+}
+
+static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ struct wl1271 *wl = hw->priv;
+ bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ int ret;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
+ (int)changed);
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ if (is_ap)
+ wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
+ else
+ wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
+
wl1271_ps_elp_sleep(wl);
out:
@@ -2158,39 +2852,62 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
{
struct wl1271 *wl = hw->priv;
u8 ps_scheme;
- int ret;
+ int ret = 0;
mutex_lock(&wl->mutex);
wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
- if (unlikely(wl->state == WL1271_STATE_OFF)) {
- ret = -EAGAIN;
+ if (params->uapsd)
+ ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
+ else
+ ps_scheme = CONF_PS_SCHEME_LEGACY;
+
+ if (wl->state == WL1271_STATE_OFF) {
+ /*
+ * If the state is off, the parameters will be recorded and
+ * configured on init. This happens in AP-mode.
+ */
+ struct conf_tx_ac_category *conf_ac =
+ &wl->conf.tx.ac_conf[wl1271_tx_get_queue(queue)];
+ struct conf_tx_tid *conf_tid =
+ &wl->conf.tx.tid_conf[wl1271_tx_get_queue(queue)];
+
+ conf_ac->ac = wl1271_tx_get_queue(queue);
+ conf_ac->cw_min = (u8)params->cw_min;
+ conf_ac->cw_max = params->cw_max;
+ conf_ac->aifsn = params->aifs;
+ conf_ac->tx_op_limit = params->txop << 5;
+
+ conf_tid->queue_id = wl1271_tx_get_queue(queue);
+ conf_tid->channel_type = CONF_CHANNEL_TYPE_EDCF;
+ conf_tid->tsid = wl1271_tx_get_queue(queue);
+ conf_tid->ps_scheme = ps_scheme;
+ conf_tid->ack_policy = CONF_ACK_POLICY_LEGACY;
+ conf_tid->apsd_conf[0] = 0;
+ conf_tid->apsd_conf[1] = 0;
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
- /* the txop is confed in units of 32us by the mac80211, we need us */
+ /*
+ * the txop is confed in units of 32us by the mac80211,
+ * we need us
+ */
ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
params->cw_min, params->cw_max,
params->aifs, params->txop << 5);
if (ret < 0)
goto out_sleep;
- if (params->uapsd)
- ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
- else
- ps_scheme = CONF_PS_SCHEME_LEGACY;
-
ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
CONF_CHANNEL_TYPE_EDCF,
wl1271_tx_get_queue(queue),
- ps_scheme, CONF_ACK_POLICY_LEGACY, 0, 0);
- if (ret < 0)
- goto out_sleep;
+ ps_scheme, CONF_ACK_POLICY_LEGACY,
+ 0, 0);
out_sleep:
wl1271_ps_elp_sleep(wl);
@@ -2215,7 +2932,7 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw)
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -2247,6 +2964,223 @@ static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
return 0;
}
+static int wl1271_allocate_sta(struct wl1271 *wl,
+ struct ieee80211_sta *sta,
+ u8 *hlid)
+{
+ struct wl1271_station *wl_sta;
+ int id;
+
+ id = find_first_zero_bit(wl->ap_hlid_map, AP_MAX_STATIONS);
+ if (id >= AP_MAX_STATIONS) {
+ wl1271_warning("could not allocate HLID - too much stations");
+ return -EBUSY;
+ }
+
+ wl_sta = (struct wl1271_station *)sta->drv_priv;
+ __set_bit(id, wl->ap_hlid_map);
+ wl_sta->hlid = WL1271_AP_STA_HLID_START + id;
+ *hlid = wl_sta->hlid;
+ memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
+ return 0;
+}
+
+static void wl1271_free_sta(struct wl1271 *wl, u8 hlid)
+{
+ int id = hlid - WL1271_AP_STA_HLID_START;
+
+ if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
+ return;
+
+ __clear_bit(id, wl->ap_hlid_map);
+ memset(wl->links[hlid].addr, 0, ETH_ALEN);
+ wl1271_tx_reset_link_queues(wl, hlid);
+ __clear_bit(hlid, &wl->ap_ps_map);
+ __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+}
+
+bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid)
+{
+ int id = hlid - WL1271_AP_STA_HLID_START;
+ return test_bit(id, wl->ap_hlid_map);
+}
+
+static int wl1271_op_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wl1271 *wl = hw->priv;
+ int ret = 0;
+ u8 hlid;
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
+ if (wl->bss_type != BSS_TYPE_AP_BSS)
+ goto out;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
+
+ ret = wl1271_allocate_sta(wl, sta, &hlid);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out_free_sta;
+
+ ret = wl1271_cmd_add_sta(wl, sta, hlid);
+ if (ret < 0)
+ goto out_sleep;
+
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+
+out_free_sta:
+ if (ret < 0)
+ wl1271_free_sta(wl, hlid);
+
+out:
+ mutex_unlock(&wl->mutex);
+ return ret;
+}
+
+static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wl1271 *wl = hw->priv;
+ struct wl1271_station *wl_sta;
+ int ret = 0, id;
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
+ if (wl->bss_type != BSS_TYPE_AP_BSS)
+ goto out;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
+
+ wl_sta = (struct wl1271_station *)sta->drv_priv;
+ id = wl_sta->hlid - WL1271_AP_STA_HLID_START;
+ if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1271_cmd_remove_sta(wl, wl_sta->hlid);
+ if (ret < 0)
+ goto out_sleep;
+
+ wl1271_free_sta(wl, wl_sta->hlid);
+
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+
+out:
+ mutex_unlock(&wl->mutex);
+ return ret;
+}
+
+/*
+ * int (*ampdu_action)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+*/
+
+static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn
+#if 0
+ , u8 buf_size
+#endif
+ )
+{
+ struct wl1271 *wl = hw->priv;
+ int ret;
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ if (wl->ba_support) {
+ ret = wl1271_acx_set_ba_receiver_session(wl, tid, *ssn,
+ true);
+ if (!ret)
+ wl->ba_rx_bitmap |= BIT(tid);
+ } else {
+ ret = -ENOTSUPP;
+ }
+ break;
+
+ case IEEE80211_AMPDU_RX_STOP:
+ ret = wl1271_acx_set_ba_receiver_session(wl, tid, 0, false);
+ if (!ret)
+ wl->ba_rx_bitmap &= ~BIT(tid);
+ break;
+
+ /*
+ * The BA initiator session management in FW independently.
+ * Falling break here on purpose for all TX APDU commands.
+ */
+ case IEEE80211_AMPDU_TX_START:
+ case IEEE80211_AMPDU_TX_STOP:
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ret = -EINVAL;
+ break;
+
+ default:
+ wl1271_error("Incorrect ampdu action id=%x\n", action);
+ ret = -EINVAL;
+ }
+
+ wl1271_ps_elp_sleep(wl);
+
+out:
+ mutex_unlock(&wl->mutex);
+
+ return ret;
+}
+
+static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
+{
+ struct wl1271 *wl = hw->priv;
+ bool ret = false;
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
+ /* packets are considered pending if in the TX queue or the FW */
+ ret = (wl->tx_queue_count > 0) || (wl->tx_frames_cnt > 0);
+
+ /* the above is appropriate for STA mode for PS purposes */
+ WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
+
+out:
+ mutex_unlock(&wl->mutex);
+
+ return ret;
+}
+
/* can't be const, mac80211 writes to this */
static struct ieee80211_rate wl1271_rates[] = {
{ .bitrate = 10,
@@ -2305,6 +3239,7 @@ static struct ieee80211_channel wl1271_channels[] = {
{ .hw_value = 11, .center_freq = 2462, .max_power = 25 },
{ .hw_value = 12, .center_freq = 2467, .max_power = 25 },
{ .hw_value = 13, .center_freq = 2472, .max_power = 25 },
+ { .hw_value = 14, .center_freq = 2484, .max_power = 25 },
};
/* mapping to indexes for wl1271_rates */
@@ -2342,7 +3277,8 @@ static const u8 wl1271_rate_to_idx_2ghz[] = {
#ifdef CONFIG_WL12XX_HT
#define WL12XX_HT_CAP { \
- .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20, \
+ .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | \
+ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT), \
.ht_supported = true, \
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K, \
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
@@ -2493,6 +3429,12 @@ static const struct ieee80211_ops wl1271_ops = {
.conf_tx = wl1271_op_conf_tx,
.get_tsf = wl1271_op_get_tsf,
.get_survey = wl1271_op_get_survey,
+ .sta_add = wl1271_op_sta_add,
+ .sta_remove = wl1271_op_sta_remove,
+ .ampdu_action = wl1271_op_ampdu_action,
+#if 0
+ .tx_frames_pending = wl1271_tx_frames_pending,
+#endif
CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
};
@@ -2543,8 +3485,7 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
unsigned long res;
int ret;
- ret = strict_strtoul(buf, 10, &res);
-
+ ret = kstrtoul(buf, 10, &res);
if (ret < 0) {
wl1271_warning("incorrect value written to bt_coex_mode");
return count;
@@ -2562,7 +3503,7 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
if (wl->state == WL1271_STATE_OFF)
goto out;
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -2607,6 +3548,22 @@ int wl1271_register_hw(struct wl1271 *wl)
if (wl->mac80211_registered)
return 0;
+ ret = wl1271_fetch_nvs(wl);
+ if (ret == 0) {
+ /* NOTE: The wl->nvs->nvs element must be first, in
+ * order to simplify the casting, we assume it is at
+ * the beginning of the wl->nvs structure.
+ */
+ u8 *nvs_ptr = (u8 *)wl->nvs;
+
+ wl->mac_addr[0] = nvs_ptr[11];
+ wl->mac_addr[1] = nvs_ptr[10];
+ wl->mac_addr[2] = nvs_ptr[6];
+ wl->mac_addr[3] = nvs_ptr[5];
+ wl->mac_addr[4] = nvs_ptr[4];
+ wl->mac_addr[5] = nvs_ptr[3];
+ }
+
SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
ret = ieee80211_register_hw(wl->hw);
@@ -2629,6 +3586,9 @@ EXPORT_SYMBOL_GPL(wl1271_register_hw);
void wl1271_unregister_hw(struct wl1271 *wl)
{
+ if (wl->state == WL1271_STATE_PLT)
+ __wl1271_plt_stop(wl);
+
unregister_netdevice_notifier(&wl1271_dev_notifier);
ieee80211_unregister_hw(wl->hw);
wl->mac80211_registered = false;
@@ -2661,13 +3621,17 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
IEEE80211_HW_SUPPORTS_UAPSD |
IEEE80211_HW_HAS_RATE_CONTROL |
IEEE80211_HW_CONNECTION_MONITOR |
- IEEE80211_HW_SUPPORTS_CQM_RSSI;
+ IEEE80211_HW_SUPPORTS_CQM_RSSI
+#if 0
+ | IEEE80211_HW_AP_LINK_PS
+#endif
+ ;
wl->hw->wiphy->cipher_suites = cipher_suites;
wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC);
+ BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
wl->hw->wiphy->max_scan_ssids = 1;
/*
* Maximum length of elements in scanning probe request templates
@@ -2676,8 +3640,24 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
*/
wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
sizeof(struct ieee80211_header);
- wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz;
- wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl1271_band_5ghz;
+
+ /* make sure all our channels fit in the scanned_ch bitmask */
+ BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
+ ARRAY_SIZE(wl1271_channels_5ghz) >
+ WL1271_MAX_CHANNELS);
+ /*
+ * We keep local copies of the band structs because we need to
+ * modify them on a per-device basis.
+ */
+ memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
+ sizeof(wl1271_band_2ghz));
+ memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
+ sizeof(wl1271_band_5ghz));
+
+ wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &wl->bands[IEEE80211_BAND_2GHZ];
+ wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &wl->bands[IEEE80211_BAND_5GHZ];
wl->hw->queues = 4;
wl->hw->max_rates = 1;
@@ -2686,6 +3666,10 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl));
+ wl->hw->sta_data_size = sizeof(struct wl1271_station);
+#if 0
+ wl->hw->max_rx_aggregation_subframes = 8;
+#endif
return 0;
}
EXPORT_SYMBOL_GPL(wl1271_init_ieee80211);
@@ -2697,7 +3681,7 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
struct ieee80211_hw *hw;
struct platform_device *plat_dev = NULL;
struct wl1271 *wl;
- int i, ret;
+ int i, j, ret;
unsigned int order;
hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
@@ -2725,9 +3709,16 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
for (i = 0; i < NUM_TX_QUEUES; i++)
skb_queue_head_init(&wl->tx_queue[i]);
+ for (i = 0; i < NUM_TX_QUEUES; i++)
+ for (j = 0; j < AP_MAX_LINKS; j++)
+ skb_queue_head_init(&wl->links[j].tx_queue[i]);
+
+ skb_queue_head_init(&wl->deferred_rx_queue);
+ skb_queue_head_init(&wl->deferred_tx_queue);
+
INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
- INIT_WORK(&wl->irq_work, wl1271_irq_work);
+ INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
INIT_WORK(&wl->tx_work, wl1271_tx_work);
INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
@@ -2735,19 +3726,26 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
wl->beacon_int = WL1271_DEFAULT_BEACON_INT;
wl->default_key = 0;
wl->rx_counter = 0;
- wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
- wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
+ wl->rx_config = WL1271_DEFAULT_STA_RX_CONFIG;
+ wl->rx_filter = WL1271_DEFAULT_STA_RX_FILTER;
wl->psm_entry_retry = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
wl->basic_rate = CONF_TX_RATE_MASK_BASIC;
wl->rate_set = CONF_TX_RATE_MASK_BASIC;
- wl->sta_rate_set = 0;
wl->band = IEEE80211_BAND_2GHZ;
wl->vif = NULL;
wl->flags = 0;
wl->sg_enabled = true;
wl->hw_pg_ver = -1;
+ wl->bss_type = MAX_BSS_TYPE;
+ wl->set_bss_type = MAX_BSS_TYPE;
+ wl->fw_bss_type = MAX_BSS_TYPE;
+ wl->last_tx_hlid = 0;
+ wl->ap_ps_map = 0;
+ wl->ap_fw_ps_map = 0;
+ wl->quirks = 0;
+ wl->platform_quirks = 0;
memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
@@ -2768,11 +3766,17 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
goto err_hw;
}
+ wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
+ if (!wl->dummy_packet) {
+ ret = -ENOMEM;
+ goto err_aggr;
+ }
+
/* Register platform device */
ret = platform_device_register(wl->plat_dev);
if (ret) {
wl1271_error("couldn't register platform device");
- goto err_aggr;
+ goto err_dummy_packet;
}
dev_set_drvdata(&wl->plat_dev->dev, wl);
@@ -2798,6 +3802,9 @@ err_bt_coex_state:
err_platform:
platform_device_unregister(wl->plat_dev);
+err_dummy_packet:
+ dev_kfree_skb(wl->dummy_packet);
+
err_aggr:
free_pages((unsigned long)wl->aggr_buf, order);
@@ -2817,6 +3824,7 @@ EXPORT_SYMBOL_GPL(wl1271_alloc_hw);
int wl1271_free_hw(struct wl1271 *wl)
{
platform_device_unregister(wl->plat_dev);
+ dev_kfree_skb(wl->dummy_packet);
free_pages((unsigned long)wl->aggr_buf,
get_order(WL1271_AGGR_BUFFER_SIZE));
kfree(wl->plat_dev);
@@ -2837,11 +3845,11 @@ int wl1271_free_hw(struct wl1271 *wl)
}
EXPORT_SYMBOL_GPL(wl1271_free_hw);
-u32 wl12xx_debug_level;
+u32 wl12xx_debug_level = DEBUG_NONE;
EXPORT_SYMBOL_GPL(wl12xx_debug_level);
-module_param_named(debug_level, wl12xx_debug_level, uint, DEBUG_NONE);
+module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/wl12xx/ps.c b/drivers/net/wireless/wl12xx/ps.c
index 60a3738eadb..874468307fb 100644
--- a/drivers/net/wireless/wl12xx/ps.c
+++ b/drivers/net/wireless/wl12xx/ps.c
@@ -24,6 +24,7 @@
#include "reg.h"
#include "ps.h"
#include "io.h"
+#include "tx.h"
#define WL1271_WAKEUP_TIMEOUT 500
@@ -42,6 +43,10 @@ void wl1271_elp_work(struct work_struct *work)
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
+ /* our work might have been already cancelled */
+ if (unlikely(!test_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
+ goto out;
+
if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) ||
(!test_bit(WL1271_FLAG_PSM, &wl->flags) &&
!test_bit(WL1271_FLAG_IDLE, &wl->flags)))
@@ -60,15 +65,19 @@ out:
/* Routines to toggle sleep mode while in ELP */
void wl1271_ps_elp_sleep(struct wl1271 *wl)
{
- if (test_bit(WL1271_FLAG_PSM, &wl->flags) ||
- test_bit(WL1271_FLAG_IDLE, &wl->flags)) {
- cancel_delayed_work(&wl->elp_work);
- ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
- msecs_to_jiffies(ELP_ENTRY_DELAY));
- }
+ /* we shouldn't get consecutive sleep requests */
+ if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
+ return;
+
+ if (!test_bit(WL1271_FLAG_PSM, &wl->flags) &&
+ !test_bit(WL1271_FLAG_IDLE, &wl->flags))
+ return;
+
+ ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
+ msecs_to_jiffies(ELP_ENTRY_DELAY));
}
-int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
+int wl1271_ps_elp_wakeup(struct wl1271 *wl)
{
DECLARE_COMPLETION_ONSTACK(compl);
unsigned long flags;
@@ -76,6 +85,16 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
u32 start_time = jiffies;
bool pending = false;
+ /*
+ * we might try to wake up even if we didn't go to sleep
+ * before (e.g. on boot)
+ */
+ if (!test_and_clear_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags))
+ return 0;
+
+ /* don't cancel_sync as it might contend for a mutex and deadlock */
+ cancel_delayed_work(&wl->elp_work);
+
if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
return 0;
@@ -86,7 +105,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
* the completion variable in one entity.
*/
spin_lock_irqsave(&wl->wl_lock, flags);
- if (work_pending(&wl->irq_work) || chip_awake)
+ if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
pending = true;
else
wl->elp_compl = &compl;
@@ -139,8 +158,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
return ret;
}
- ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE,
- rates, send);
+ ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
if (ret < 0)
return ret;
@@ -149,9 +167,6 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
case STATION_ACTIVE_MODE:
default:
wl1271_debug(DEBUG_PSM, "leaving psm");
- ret = wl1271_ps_elp_wakeup(wl, false);
- if (ret < 0)
- return ret;
/* disable beacon early termination */
ret = wl1271_acx_bet_enable(wl, false);
@@ -163,8 +178,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
if (ret < 0)
return ret;
- ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE,
- rates, send);
+ ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE);
if (ret < 0)
return ret;
@@ -175,4 +189,85 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
return ret;
}
+static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
+{
+ int i, filtered = 0;
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *info;
+ unsigned long flags;
+ /* filter all frames currently the low level queus for this hlid */
+ for (i = 0; i < NUM_TX_QUEUES; i++) {
+ while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
+ info = IEEE80211_SKB_CB(skb);
+ info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+ info->status.rates[0].idx = -1;
+ ieee80211_tx_status(wl->hw, skb);
+ filtered++;
+ }
+ }
+
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ wl->tx_queue_count -= filtered;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ wl1271_handle_tx_low_watermark(wl);
+}
+
+void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
+{
+ struct ieee80211_sta *sta;
+
+ if (test_bit(hlid, &wl->ap_ps_map))
+ return;
+
+ wl1271_debug(DEBUG_PSM, "start mac80211 PSM on hlid %d blks %d "
+ "clean_queues %d", hlid, wl->links[hlid].allocated_blks,
+ clean_queues);
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
+ if (!sta) {
+ wl1271_error("could not find sta %pM for starting ps",
+ wl->links[hlid].addr);
+ rcu_read_unlock();
+ return;
+ }
+
+#if 0
+ ieee80211_sta_ps_transition_ni(sta, true);
+#endif
+ rcu_read_unlock();
+
+ /* do we want to filter all frames from this link's queues? */
+ if (clean_queues)
+ wl1271_ps_filter_frames(wl, hlid);
+
+ __set_bit(hlid, &wl->ap_ps_map);
+}
+
+void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid)
+{
+ struct ieee80211_sta *sta;
+
+ if (!test_bit(hlid, &wl->ap_ps_map))
+ return;
+
+ wl1271_debug(DEBUG_PSM, "end mac80211 PSM on hlid %d", hlid);
+
+ __clear_bit(hlid, &wl->ap_ps_map);
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
+ if (!sta) {
+ wl1271_error("could not find sta %pM for ending ps",
+ wl->links[hlid].addr);
+ goto end;
+ }
+
+#if 0
+ ieee80211_sta_ps_transition_ni(sta, false);
+#endif
+end:
+ rcu_read_unlock();
+}
diff --git a/drivers/net/wireless/wl12xx/ps.h b/drivers/net/wireless/wl12xx/ps.h
index 8415060f08e..c41bd0a711b 100644
--- a/drivers/net/wireless/wl12xx/ps.h
+++ b/drivers/net/wireless/wl12xx/ps.h
@@ -30,7 +30,9 @@
int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
u32 rates, bool send);
void wl1271_ps_elp_sleep(struct wl1271 *wl);
-int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake);
+int wl1271_ps_elp_wakeup(struct wl1271 *wl);
void wl1271_elp_work(struct work_struct *work);
+void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues);
+void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid);
#endif /* __WL1271_PS_H__ */
diff --git a/drivers/net/wireless/wl12xx/reg.h b/drivers/net/wireless/wl12xx/reg.h
index 99096077152..440a4ee9cb4 100644
--- a/drivers/net/wireless/wl12xx/reg.h
+++ b/drivers/net/wireless/wl12xx/reg.h
@@ -207,6 +207,8 @@
#define CHIP_ID_1271_PG10 (0x4030101)
#define CHIP_ID_1271_PG20 (0x4030111)
+#define CHIP_ID_1283_PG10 (0x05030101)
+#define CHIP_ID_1283_PG20 (0x05030111)
#define ENABLE (REGISTERS_BASE + 0x5450)
@@ -452,24 +454,11 @@
#define HI_CFG_UART_TX_OUT_GPIO_14 0x00000200
#define HI_CFG_UART_TX_OUT_GPIO_7 0x00000400
-/*
- * NOTE: USE_ACTIVE_HIGH compilation flag should be defined in makefile
- * for platforms using active high interrupt level
- */
-#ifdef USE_ACTIVE_HIGH
#define HI_CFG_DEF_VAL \
(HI_CFG_UART_ENABLE | \
HI_CFG_RST232_ENABLE | \
HI_CFG_CLOCK_REQ_SELECT | \
HI_CFG_HOST_INT_ENABLE)
-#else
-#define HI_CFG_DEF_VAL \
- (HI_CFG_UART_ENABLE | \
- HI_CFG_RST232_ENABLE | \
- HI_CFG_CLOCK_REQ_SELECT | \
- HI_CFG_HOST_INT_ENABLE)
-
-#endif
#define REF_FREQ_19_2 0
#define REF_FREQ_26_0 1
diff --git a/drivers/net/wireless/wl12xx/rx.c b/drivers/net/wireless/wl12xx/rx.c
index 682304c30b8..5aeed395e8a 100644
--- a/drivers/net/wireless/wl12xx/rx.c
+++ b/drivers/net/wireless/wl12xx/rx.c
@@ -29,14 +29,14 @@
#include "rx.h"
#include "io.h"
-static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status,
+static u8 wl1271_rx_get_mem_block(struct wl1271_fw_common_status *status,
u32 drv_rx_counter)
{
return le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
RX_MEM_BLOCK_MASK;
}
-static u32 wl1271_rx_get_buf_size(struct wl1271_fw_status *status,
+static u32 wl1271_rx_get_buf_size(struct wl1271_fw_common_status *status,
u32 drv_rx_counter)
{
return (le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
@@ -48,18 +48,14 @@ static void wl1271_rx_status(struct wl1271 *wl,
struct ieee80211_rx_status *status,
u8 beacon)
{
- enum ieee80211_band desc_band;
-
memset(status, 0, sizeof(struct ieee80211_rx_status));
- status->band = wl->band;
-
if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == WL1271_RX_DESC_BAND_BG)
- desc_band = IEEE80211_BAND_2GHZ;
+ status->band = IEEE80211_BAND_2GHZ;
else
- desc_band = IEEE80211_BAND_5GHZ;
+ status->band = IEEE80211_BAND_5GHZ;
- status->rate_idx = wl1271_rate_to_idx(desc->rate, desc_band);
+ status->rate_idx = wl1271_rate_to_idx(desc->rate, status->band);
#ifdef CONFIG_WL12XX_HT
/* 11n support */
@@ -76,15 +72,22 @@ static void wl1271_rx_status(struct wl1271 *wl,
*/
wl->noise = desc->rssi - (desc->snr >> 1);
- status->freq = ieee80211_channel_to_frequency(desc->channel);
+ status->freq = ieee80211_channel_to_frequency(desc->channel
+#if 0
+ , status->band
+#endif
+ );
if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) {
- status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
+ u8 desc_err_code = desc->status & WL1271_RX_DESC_STATUS_MASK;
+
+ status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED |
+ RX_FLAG_DECRYPTED;
- if (likely(!(desc->status & WL1271_RX_DESC_DECRYPT_FAIL)))
- status->flag |= RX_FLAG_DECRYPTED;
- if (unlikely(desc->status & WL1271_RX_DESC_MIC_FAIL))
+ if (unlikely(desc_err_code == WL1271_RX_DESC_MIC_FAIL)) {
status->flag |= RX_FLAG_MMIC_ERROR;
+ wl1271_warning("Michael MIC error");
+ }
}
}
@@ -92,7 +95,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
{
struct wl1271_rx_descriptor *desc;
struct sk_buff *skb;
- u16 *fc;
+ struct ieee80211_hdr *hdr;
u8 *buf;
u8 beacon = 0;
@@ -103,6 +106,25 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
if (unlikely(wl->state == WL1271_STATE_PLT))
return -EINVAL;
+ /* the data read starts with the descriptor */
+ desc = (struct wl1271_rx_descriptor *) data;
+
+ switch (desc->status & WL1271_RX_DESC_STATUS_MASK) {
+ /* discard corrupted packets */
+ case WL1271_RX_DESC_DRIVER_RX_Q_FAIL:
+ case WL1271_RX_DESC_DECRYPT_FAIL:
+ wl1271_warning("corrupted packet in RX with status: 0x%x",
+ desc->status & WL1271_RX_DESC_STATUS_MASK);
+ return -EINVAL;
+ case WL1271_RX_DESC_SUCCESS:
+ case WL1271_RX_DESC_MIC_FAIL:
+ break;
+ default:
+ wl1271_error("invalid RX descriptor status: 0x%x",
+ desc->status & WL1271_RX_DESC_STATUS_MASK);
+ return -EINVAL;
+ }
+
skb = __dev_alloc_skb(length, GFP_KERNEL);
if (!skb) {
wl1271_error("Couldn't allocate RX frame");
@@ -112,29 +134,28 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
buf = skb_put(skb, length);
memcpy(buf, data, length);
- /* the data read starts with the descriptor */
- desc = (struct wl1271_rx_descriptor *) buf;
-
/* now we pull the descriptor out of the buffer */
skb_pull(skb, sizeof(*desc));
- fc = (u16 *)skb->data;
- if ((*fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON)
+ hdr = (struct ieee80211_hdr *)skb->data;
+ if (ieee80211_is_beacon(hdr->frame_control))
beacon = 1;
wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
- wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb, skb->len,
+ wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb,
+ skb->len - desc->pad_len,
beacon ? "beacon" : "");
skb_trim(skb, skb->len - desc->pad_len);
- ieee80211_rx_ni(wl->hw, skb);
+ skb_queue_tail(&wl->deferred_rx_queue, skb);
+ ieee80211_queue_work(wl->hw, &wl->netstack_work);
return 0;
}
-void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
+void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
{
struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
u32 buf_size;
@@ -162,18 +183,25 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
break;
}
- /*
- * Choose the block we want to read
- * For aggregated packets, only the first memory block should
- * be retrieved. The FW takes care of the rest.
- */
- mem_block = wl1271_rx_get_mem_block(status, drv_rx_counter);
- wl->rx_mem_pool_addr.addr = (mem_block << 8) +
- le32_to_cpu(wl_mem_map->packet_memory_pool_start);
- wl->rx_mem_pool_addr.addr_extra =
- wl->rx_mem_pool_addr.addr + 4;
- wl1271_write(wl, WL1271_SLV_REG_DATA, &wl->rx_mem_pool_addr,
- sizeof(wl->rx_mem_pool_addr), false);
+ if (wl->chip.id != CHIP_ID_1283_PG20) {
+ /*
+ * Choose the block we want to read
+ * For aggregated packets, only the first memory block
+ * should be retrieved. The FW takes care of the rest.
+ */
+ mem_block = wl1271_rx_get_mem_block(status,
+ drv_rx_counter);
+
+ wl->rx_mem_pool_addr.addr = (mem_block << 8) +
+ le32_to_cpu(wl_mem_map->packet_memory_pool_start);
+
+ wl->rx_mem_pool_addr.addr_extra =
+ wl->rx_mem_pool_addr.addr + 4;
+
+ wl1271_write(wl, WL1271_SLV_REG_DATA,
+ &wl->rx_mem_pool_addr,
+ sizeof(wl->rx_mem_pool_addr), false);
+ }
/* Read all available packets at once */
wl1271_read(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
@@ -198,6 +226,22 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
pkt_offset += pkt_length;
}
}
- wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS,
- cpu_to_le32(wl->rx_counter));
+
+ /*
+ * Write the driver's packet counter to the FW. This is only required
+ * for older hardware revisions
+ */
+ if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
+ wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
+}
+
+void wl1271_set_default_filters(struct wl1271 *wl)
+{
+ if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ wl->rx_config = WL1271_DEFAULT_AP_RX_CONFIG;
+ wl->rx_filter = WL1271_DEFAULT_AP_RX_FILTER;
+ } else {
+ wl->rx_config = WL1271_DEFAULT_STA_RX_CONFIG;
+ wl->rx_filter = WL1271_DEFAULT_STA_RX_FILTER;
+ }
}
diff --git a/drivers/net/wireless/wl12xx/rx.h b/drivers/net/wireless/wl12xx/rx.h
index 3abb26fe036..75fabf83649 100644
--- a/drivers/net/wireless/wl12xx/rx.h
+++ b/drivers/net/wireless/wl12xx/rx.h
@@ -30,10 +30,6 @@
#define WL1271_RX_MAX_RSSI -30
#define WL1271_RX_MIN_RSSI -95
-#define WL1271_RX_ALIGN_TO 4
-#define WL1271_RX_ALIGN(len) (((len) + WL1271_RX_ALIGN_TO - 1) & \
- ~(WL1271_RX_ALIGN_TO - 1))
-
#define SHORT_PREAMBLE_BIT BIT(0)
#define OFDM_RATE_BIT BIT(6)
#define PBCC_RATE_BIT BIT(7)
@@ -86,8 +82,9 @@
/*
* RX Descriptor status
*
- * Bits 0-2 - status
- * Bits 3-7 - reserved
+ * Bits 0-2 - error code
+ * Bits 3-5 - process_id tag (AP mode FW)
+ * Bits 6-7 - reserved
*/
#define WL1271_RX_DESC_STATUS_MASK 0x07
@@ -110,12 +107,16 @@ struct wl1271_rx_descriptor {
u8 snr;
__le32 timestamp;
u8 packet_class;
- u8 process_id;
+ union {
+ u8 process_id; /* STA FW */
+ u8 hlid; /* AP FW */
+ } __packed;
u8 pad_len;
u8 reserved;
} __packed;
-void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status);
+void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status);
u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
+void wl1271_set_default_filters(struct wl1271 *wl);
#endif
diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/wl12xx/scan.c
index 6f897b9d90c..5d0544c8f3f 100644
--- a/drivers/net/wireless/wl12xx/scan.c
+++ b/drivers/net/wireless/wl12xx/scan.c
@@ -27,6 +27,7 @@
#include "cmd.h"
#include "scan.h"
#include "acx.h"
+#include "ps.h"
void wl1271_scan_complete_work(struct work_struct *work)
{
@@ -40,25 +41,31 @@ void wl1271_scan_complete_work(struct work_struct *work)
mutex_lock(&wl->mutex);
- if (wl->scan.state == WL1271_SCAN_STATE_IDLE) {
- mutex_unlock(&wl->mutex);
- return;
- }
+ if (wl->state == WL1271_STATE_OFF)
+ goto out;
+
+ if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
+ goto out;
wl->scan.state = WL1271_SCAN_STATE_IDLE;
- kfree(wl->scan.scanned_ch);
- wl->scan.scanned_ch = NULL;
+ memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
wl->scan.req = NULL;
ieee80211_scan_completed(wl->hw, false);
/* restore hardware connection monitoring template */
- if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
- wl1271_cmd_build_ap_probe_req(wl, wl->probereq);
+ if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+ if (wl1271_ps_elp_wakeup(wl) == 0) {
+ wl1271_cmd_build_ap_probe_req(wl, wl->probereq);
+ wl1271_ps_elp_sleep(wl);
+ }
+ }
if (wl->scan.failed) {
wl1271_info("Scan completed due to error.");
ieee80211_queue_work(wl->hw, &wl->recovery_work);
}
+
+out:
mutex_unlock(&wl->mutex);
}
@@ -79,7 +86,7 @@ static int wl1271_get_scan_channels(struct wl1271 *wl,
flags = req->channels[i]->flags;
- if (!wl->scan.scanned_ch[i] &&
+ if (!test_bit(i, wl->scan.scanned_ch) &&
!(flags & IEEE80211_CHAN_DISABLED) &&
((!!(flags & IEEE80211_CHAN_PASSIVE_SCAN)) == passive) &&
(req->channels[i]->band == band)) {
@@ -116,7 +123,7 @@ static int wl1271_get_scan_channels(struct wl1271 *wl,
memset(&channels[j].bssid_msb, 0xff, 2);
/* Mark the channels we already used */
- wl->scan.scanned_ch[i] = true;
+ set_bit(i, wl->scan.scanned_ch);
j++;
}
@@ -283,6 +290,12 @@ void wl1271_scan_stm(struct wl1271 *wl)
int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
struct cfg80211_scan_request *req)
{
+ /*
+ * cfg80211 should guarantee that we don't get more channels
+ * than what we have registered.
+ */
+ BUG_ON(req->n_channels > WL1271_MAX_CHANNELS);
+
if (wl->scan.state != WL1271_SCAN_STATE_IDLE)
return -EBUSY;
@@ -296,10 +309,8 @@ int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
}
wl->scan.req = req;
+ memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
- wl->scan.scanned_ch = kcalloc(req->n_channels,
- sizeof(*wl->scan.scanned_ch),
- GFP_KERNEL);
/* we assume failure so that timeout scenarios are handled correctly */
wl->scan.failed = true;
ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index 93cbb8d5aba..003ab03842b 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -28,6 +28,7 @@
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
#include <linux/gpio.h>
#include <linux/wl12xx.h>
#include <linux/pm_runtime.h>
@@ -50,6 +51,13 @@ static const struct sdio_device_id wl1271_devices[] = {
};
MODULE_DEVICE_TABLE(sdio, wl1271_devices);
+static void wl1271_sdio_set_block_size(struct wl1271 *wl, unsigned int blksz)
+{
+ sdio_claim_host(wl->if_priv);
+ sdio_set_block_size(wl->if_priv, blksz);
+ sdio_release_host(wl->if_priv);
+}
+
static inline struct sdio_func *wl_to_func(struct wl1271 *wl)
{
return wl->if_priv;
@@ -60,7 +68,7 @@ static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
return &(wl_to_func(wl)->dev);
}
-static irqreturn_t wl1271_irq(int irq, void *cookie)
+static irqreturn_t wl1271_hardirq(int irq, void *cookie)
{
struct wl1271 *wl = cookie;
unsigned long flags;
@@ -69,17 +77,14 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
/* complete the ELP completion */
spin_lock_irqsave(&wl->wl_lock, flags);
+ set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
if (wl->elp_compl) {
complete(wl->elp_compl);
wl->elp_compl = NULL;
}
-
- if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
- ieee80211_queue_work(wl->hw, &wl->irq_work);
- set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
spin_unlock_irqrestore(&wl->wl_lock, flags);
- return IRQ_HANDLED;
+ return IRQ_WAKE_THREAD;
}
static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
@@ -106,8 +111,6 @@ static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
int ret;
struct sdio_func *func = wl_to_func(wl);
- sdio_claim_host(func);
-
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
@@ -123,8 +126,6 @@ static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
}
- sdio_release_host(func);
-
if (ret)
wl1271_error("sdio read failed (%d)", ret);
}
@@ -135,8 +136,6 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
int ret;
struct sdio_func *func = wl_to_func(wl);
- sdio_claim_host(func);
-
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
@@ -152,8 +151,6 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
ret = sdio_memcpy_toio(func, addr, buf, len);
}
- sdio_release_host(func);
-
if (ret)
wl1271_error("sdio write failed (%d)", ret);
}
@@ -168,9 +165,13 @@ static int wl1271_sdio_power_on(struct wl1271 *wl)
if (ret < 0)
goto out;
+ /* Runtime PM might be disabled, so power up the card manually */
+ ret = mmc_power_restore_host(func->card->host);
+ if (ret < 0)
+ goto out;
+
sdio_claim_host(func);
sdio_enable_func(func);
- sdio_release_host(func);
out:
return ret;
@@ -179,12 +180,17 @@ out:
static int wl1271_sdio_power_off(struct wl1271 *wl)
{
struct sdio_func *func = wl_to_func(wl);
+ int ret;
- sdio_claim_host(func);
sdio_disable_func(func);
sdio_release_host(func);
- /* Power down the card */
+ /* Runtime PM might be disabled, so power off the card manually */
+ ret = mmc_power_save_host(func->card->host);
+ if (ret < 0)
+ return ret;
+
+ /* Let runtime PM know the card is powered off */
return pm_runtime_put_sync(&func->dev);
}
@@ -204,7 +210,8 @@ static struct wl1271_if_operations sdio_ops = {
.power = wl1271_sdio_set_power,
.dev = wl1271_sdio_wl_to_dev,
.enable_irq = wl1271_sdio_enable_interrupts,
- .disable_irq = wl1271_sdio_disable_interrupts
+ .disable_irq = wl1271_sdio_disable_interrupts,
+ .set_block_size = wl1271_sdio_set_block_size,
};
static int __devinit wl1271_probe(struct sdio_func *func,
@@ -213,6 +220,7 @@ static int __devinit wl1271_probe(struct sdio_func *func,
struct ieee80211_hw *hw;
const struct wl12xx_platform_data *wlan_data;
struct wl1271 *wl;
+ unsigned long irqflags;
int ret;
/* We are only able to handle the wlan function */
@@ -231,6 +239,9 @@ static int __devinit wl1271_probe(struct sdio_func *func,
/* Grab access to FN0 for ELP reg. */
func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
+ /* Use block mode for transferring over one block size of data */
+ func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
+
wlan_data = wl12xx_get_platform_data();
if (IS_ERR(wlan_data)) {
ret = PTR_ERR(wlan_data);
@@ -240,15 +251,22 @@ static int __devinit wl1271_probe(struct sdio_func *func,
wl->irq = wlan_data->irq;
wl->ref_clock = wlan_data->board_ref_clock;
+ wl->tcxo_clock = wlan_data->board_tcxo_clock;
+ wl->platform_quirks = wlan_data->platform_quirks;
+
+ if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
+ irqflags = IRQF_TRIGGER_RISING;
+ else
+ irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
- ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
+ ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
+ irqflags,
+ DRIVER_NAME, wl);
if (ret < 0) {
wl1271_error("request_irq() failed: %d", ret);
goto out_free;
}
- set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
-
disable_irq(wl->irq);
ret = wl1271_init_ieee80211(wl);
@@ -271,7 +289,6 @@ static int __devinit wl1271_probe(struct sdio_func *func,
out_irq:
free_irq(wl->irq, wl);
-
out_free:
wl1271_free_hw(wl);
@@ -302,9 +319,23 @@ static int wl1271_resume(struct device *dev)
return 0;
}
+/*
+ * SDIO bus runtime idle gets precedence over this, but that just calls
+ * the generic version. The generic version will call our version if
+ * it exists, so we still get called. We need to allow it to power us
+ * off.
+ */
+static int wl1271_runtime_idle(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+
+ return mmc_power_save_host(func->card->host);
+}
+
static const struct dev_pm_ops wl1271_sdio_pm_ops = {
.suspend = wl1271_suspend,
.resume = wl1271_resume,
+ .runtime_idle = wl1271_runtime_idle,
};
static struct sdio_driver wl1271_sdio_driver = {
@@ -342,6 +373,9 @@ module_init(wl1271_init);
module_exit(wl1271_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
MODULE_FIRMWARE(WL1271_FW_NAME);
+MODULE_FIRMWARE(WL128X_FW_NAME);
+MODULE_FIRMWARE(WL127X_AP_FW_NAME);
+MODULE_FIRMWARE(WL128X_AP_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/sdio_test.c b/drivers/net/wireless/wl12xx/sdio_test.c
index 9fcbd3dd849..f2891539287 100644
--- a/drivers/net/wireless/wl12xx/sdio_test.c
+++ b/drivers/net/wireless/wl12xx/sdio_test.c
@@ -189,7 +189,12 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
const struct firmware *fw;
int ret;
- ret = request_firmware(&fw, WL1271_FW_NAME, wl1271_wl_to_dev(wl));
+ if (wl->chip.id == CHIP_ID_1283_PG20)
+ ret = request_firmware(&fw, WL128X_FW_NAME,
+ wl1271_wl_to_dev(wl));
+ else
+ ret = request_firmware(&fw, WL1271_FW_NAME,
+ wl1271_wl_to_dev(wl));
if (ret < 0) {
wl1271_error("could not get firmware: %d", ret);
@@ -227,14 +232,14 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
const struct firmware *fw;
int ret;
- ret = request_firmware(&fw, WL1271_NVS_NAME, wl1271_wl_to_dev(wl));
+ ret = request_firmware(&fw, WL12XX_NVS_NAME, wl1271_wl_to_dev(wl));
if (ret < 0) {
wl1271_error("could not get nvs file: %d", ret);
return ret;
}
- wl->nvs = kmemdup(fw->data, sizeof(struct wl1271_nvs_file), GFP_KERNEL);
+ wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
if (!wl->nvs) {
wl1271_error("could not allocate memory for the nvs file");
@@ -288,6 +293,11 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
wl1271_notice("chip id 0x%x (1271 PG20)",
wl->chip.id);
break;
+ case CHIP_ID_1283_PG20:
+ wl1271_notice("chip id 0x%x (1283 PG20)",
+ wl->chip.id);
+ break;
+ case CHIP_ID_1283_PG10:
default:
wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
return -ENODEV;
@@ -407,6 +417,9 @@ static int __devinit wl1271_probe(struct sdio_func *func,
/* Grab access to FN0 for ELP reg. */
func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
+ /* Use block mode for transferring over one block size of data */
+ func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
+
wlan_data = wl12xx_get_platform_data();
if (IS_ERR(wlan_data)) {
ret = PTR_ERR(wlan_data);
@@ -416,6 +429,7 @@ static int __devinit wl1271_probe(struct sdio_func *func,
wl->irq = wlan_data->irq;
wl->ref_clock = wlan_data->board_ref_clock;
+ wl->tcxo_clock = wlan_data->board_tcxo_clock;
sdio_set_drvdata(func, wl_test);
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
index 7145ea54378..51662bb6801 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -110,6 +110,7 @@ static void wl1271_spi_reset(struct wl1271 *wl)
spi_message_add_tail(&t, &m);
spi_sync(wl_to_spi(wl), &m);
+
wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
kfree(cmd);
}
@@ -319,28 +320,23 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
spi_sync(wl_to_spi(wl), &m);
}
-static irqreturn_t wl1271_irq(int irq, void *cookie)
+static irqreturn_t wl1271_hardirq(int irq, void *cookie)
{
- struct wl1271 *wl;
+ struct wl1271 *wl = cookie;
unsigned long flags;
wl1271_debug(DEBUG_IRQ, "IRQ");
- wl = cookie;
-
/* complete the ELP completion */
spin_lock_irqsave(&wl->wl_lock, flags);
+ set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
if (wl->elp_compl) {
complete(wl->elp_compl);
wl->elp_compl = NULL;
}
-
- if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
- ieee80211_queue_work(wl->hw, &wl->irq_work);
- set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
spin_unlock_irqrestore(&wl->wl_lock, flags);
- return IRQ_HANDLED;
+ return IRQ_WAKE_THREAD;
}
static int wl1271_spi_set_power(struct wl1271 *wl, bool enable)
@@ -359,7 +355,8 @@ static struct wl1271_if_operations spi_ops = {
.power = wl1271_spi_set_power,
.dev = wl1271_spi_wl_to_dev,
.enable_irq = wl1271_spi_enable_interrupts,
- .disable_irq = wl1271_spi_disable_interrupts
+ .disable_irq = wl1271_spi_disable_interrupts,
+ .set_block_size = NULL,
};
static int __devinit wl1271_probe(struct spi_device *spi)
@@ -367,6 +364,7 @@ static int __devinit wl1271_probe(struct spi_device *spi)
struct wl12xx_platform_data *pdata;
struct ieee80211_hw *hw;
struct wl1271 *wl;
+ unsigned long irqflags;
int ret;
pdata = spi->dev.platform_data;
@@ -404,6 +402,13 @@ static int __devinit wl1271_probe(struct spi_device *spi)
}
wl->ref_clock = pdata->board_ref_clock;
+ wl->tcxo_clock = pdata->board_tcxo_clock;
+ wl->platform_quirks = pdata->platform_quirks;
+
+ if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
+ irqflags = IRQF_TRIGGER_RISING;
+ else
+ irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
wl->irq = spi->irq;
if (wl->irq < 0) {
@@ -412,14 +417,14 @@ static int __devinit wl1271_probe(struct spi_device *spi)
goto out_free;
}
- ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
+ ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
+ irqflags,
+ DRIVER_NAME, wl);
if (ret < 0) {
wl1271_error("request_irq() failed: %d", ret);
goto out_free;
}
- set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
-
disable_irq(wl->irq);
ret = wl1271_init_ieee80211(wl);
@@ -491,7 +496,10 @@ module_init(wl1271_init);
module_exit(wl1271_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
MODULE_FIRMWARE(WL1271_FW_NAME);
+MODULE_FIRMWARE(WL128X_FW_NAME);
+MODULE_FIRMWARE(WL127X_AP_FW_NAME);
+MODULE_FIRMWARE(WL128X_AP_FW_NAME);
MODULE_ALIAS("spi:wl1271");
diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c
index 6ec06a4a4c6..da351d7cd1f 100644
--- a/drivers/net/wireless/wl12xx/testmode.c
+++ b/drivers/net/wireless/wl12xx/testmode.c
@@ -27,6 +27,7 @@
#include "wl12xx.h"
#include "acx.h"
+#include "reg.h"
#define WL1271_TM_MAX_DATA_LENGTH 1024
@@ -204,7 +205,10 @@ static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[])
kfree(wl->nvs);
- if (len != sizeof(struct wl1271_nvs_file))
+ if ((wl->chip.id == CHIP_ID_1283_PG20) &&
+ (len != sizeof(struct wl128x_nvs_file)))
+ return -EINVAL;
+ else if (len != sizeof(struct wl1271_nvs_file))
return -EINVAL;
wl->nvs = kzalloc(len, GFP_KERNEL);
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/wl12xx/tx.c
index b44c75cd8c1..cc837bba546 100644
--- a/drivers/net/wireless/wl12xx/tx.c
+++ b/drivers/net/wireless/wl12xx/tx.c
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/etherdevice.h>
#include "wl12xx.h"
#include "io.h"
@@ -30,6 +31,23 @@
#include "ps.h"
#include "tx.h"
+static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id)
+{
+ int ret;
+ bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+
+ if (is_ap)
+ ret = wl1271_cmd_set_ap_default_wep_key(wl, id);
+ else
+ ret = wl1271_cmd_set_sta_default_wep_key(wl, id);
+
+ if (ret < 0)
+ return ret;
+
+ wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id);
+ return 0;
+}
+
static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
{
int id;
@@ -47,18 +65,116 @@ static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
static void wl1271_free_tx_id(struct wl1271 *wl, int id)
{
if (__test_and_clear_bit(id, wl->tx_frames_map)) {
+ if (unlikely(wl->tx_frames_cnt == ACX_TX_DESCRIPTORS))
+ clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
+
wl->tx_frames[id] = NULL;
wl->tx_frames_cnt--;
}
}
+static int wl1271_tx_update_filters(struct wl1271 *wl,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)(skb->data +
+ sizeof(struct wl1271_tx_hw_descr));
+
+ /*
+ * stop bssid-based filtering before transmitting authentication
+ * requests. this way the hw will never drop authentication
+ * responses coming from BSSIDs it isn't familiar with (e.g. on
+ * roaming)
+ */
+ if (!ieee80211_is_auth(hdr->frame_control))
+ return 0;
+
+ wl1271_configure_filters(wl, FIF_OTHER_BSS);
+
+ return wl1271_acx_rx_config(wl, wl->rx_config, wl->rx_filter);
+}
+
+static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+
+ /*
+ * add the station to the known list before transmitting the
+ * authentication response. this way it won't get de-authed by FW
+ * when transmitting too soon.
+ */
+ hdr = (struct ieee80211_hdr *)(skb->data +
+ sizeof(struct wl1271_tx_hw_descr));
+ if (ieee80211_is_auth(hdr->frame_control))
+ wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
+}
+
+static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
+{
+ bool fw_ps;
+ u8 tx_blks;
+
+ /* only regulate station links */
+ if (hlid < WL1271_AP_STA_HLID_START)
+ return;
+
+ fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+ tx_blks = wl->links[hlid].allocated_blks;
+
+ /*
+ * if in FW PS and there is enough data in FW we can put the link
+ * into high-level PS and clean out its TX queues.
+ */
+ if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS)
+ wl1271_ps_link_start(wl, hlid, true);
+}
+
+u8 wl1271_tx_get_hlid(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
+
+ if (control->control.sta) {
+ struct wl1271_station *wl_sta;
+
+ wl_sta = (struct wl1271_station *)
+ control->control.sta->drv_priv;
+ return wl_sta->hlid;
+ } else {
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ return WL1271_AP_GLOBAL_HLID;
+ else
+ return WL1271_AP_BROADCAST_HLID;
+ }
+}
+
+static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl,
+ unsigned int packet_length)
+{
+ if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT)
+ return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
+ else
+ return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
+}
+
static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
- u32 buf_offset)
+ u32 buf_offset, u8 hlid)
{
struct wl1271_tx_hw_descr *desc;
u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
+ u32 len;
u32 total_blocks;
int id, ret = -EBUSY;
+ u32 spare_blocks;
+
+ if (unlikely(wl->quirks & WL12XX_QUIRK_USE_2_SPARE_BLOCKS))
+ spare_blocks = 2;
+ else
+ spare_blocks = 1;
if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
return -EAGAIN;
@@ -70,17 +186,30 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
/* approximate the number of blocks required for this packet
in the firmware */
- total_blocks = total_len + TX_HW_BLOCK_SIZE - 1;
- total_blocks = total_blocks / TX_HW_BLOCK_SIZE + TX_HW_BLOCK_SPARE;
+ len = wl12xx_calc_packet_alignment(wl, total_len);
+
+ total_blocks = (len + TX_HW_BLOCK_SIZE - 1) / TX_HW_BLOCK_SIZE +
+ spare_blocks;
+
if (total_blocks <= wl->tx_blocks_available) {
desc = (struct wl1271_tx_hw_descr *)skb_push(
skb, total_len - skb->len);
- desc->extra_mem_blocks = TX_HW_BLOCK_SPARE;
- desc->total_mem_blocks = total_blocks;
+ /* HW descriptor fields change between wl127x and wl128x */
+ if (wl->chip.id == CHIP_ID_1283_PG20) {
+ desc->wl128x_mem.total_mem_blocks = total_blocks;
+ } else {
+ desc->wl127x_mem.extra_blocks = spare_blocks;
+ desc->wl127x_mem.total_mem_blocks = total_blocks;
+ }
+
desc->id = id;
wl->tx_blocks_available -= total_blocks;
+ wl->tx_allocated_blocks += total_blocks;
+
+ if (wl->bss_type == BSS_TYPE_AP_BSS)
+ wl->links[hlid].allocated_blks += total_blocks;
ret = 0;
@@ -94,12 +223,18 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
return ret;
}
+static bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
+{
+ return wl->dummy_packet == skb;
+}
+
static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
- u32 extra, struct ieee80211_tx_info *control)
+ u32 extra, struct ieee80211_tx_info *control,
+ u8 hlid)
{
struct timespec ts;
struct wl1271_tx_hw_descr *desc;
- int pad, ac;
+ int aligned_len, ac, rate_idx;
s64 hosttime;
u16 tx_attr;
@@ -117,33 +252,91 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
getnstimeofday(&ts);
hosttime = (timespec_to_ns(&ts) >> 10);
desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
- desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
- /* configure the tx attributes */
- tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
+ if (wl->bss_type != BSS_TYPE_AP_BSS)
+ desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
+ else
+ desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
- /* queue (we use same identifiers for tid's and ac's */
+ /* queue */
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
- desc->tid = ac;
- desc->aid = TX_HW_DEFAULT_AID;
+ desc->tid = skb->priority;
+
+ if (wl12xx_is_dummy_packet(wl, skb)) {
+ /*
+ * FW expects the dummy packet to have an invalid session id -
+ * any session id that is different than the one set in the join
+ */
+ tx_attr = ((~wl->session_counter) <<
+ TX_HW_ATTR_OFST_SESSION_COUNTER) &
+ TX_HW_ATTR_SESSION_COUNTER;
+
+ tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
+ } else {
+ /* configure the tx attributes */
+ tx_attr =
+ wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
+ }
+
+ if (wl->bss_type != BSS_TYPE_AP_BSS) {
+ desc->aid = hlid;
+
+ /* if the packets are destined for AP (have a STA entry)
+ send them with AP rate policies, otherwise use default
+ basic rates */
+ if (control->control.sta)
+ rate_idx = ACX_TX_AP_FULL_RATE;
+ else
+ rate_idx = ACX_TX_BASIC_RATE;
+ } else {
+ desc->hlid = hlid;
+ switch (hlid) {
+ case WL1271_AP_GLOBAL_HLID:
+ rate_idx = ACX_TX_AP_MODE_MGMT_RATE;
+ break;
+ case WL1271_AP_BROADCAST_HLID:
+ rate_idx = ACX_TX_AP_MODE_BCST_RATE;
+ break;
+ default:
+ rate_idx = ac;
+ break;
+ }
+ }
+
+ tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
desc->reserved = 0;
- /* align the length (and store in terms of words) */
- pad = WL1271_TX_ALIGN(skb->len);
- desc->length = cpu_to_le16(pad >> 2);
+ aligned_len = wl12xx_calc_packet_alignment(wl, skb->len);
- /* calculate number of padding bytes */
- pad = pad - skb->len;
- tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
+ if (wl->chip.id == CHIP_ID_1283_PG20) {
+ desc->wl128x_mem.extra_bytes = aligned_len - skb->len;
+ desc->length = cpu_to_le16(aligned_len >> 2);
- /* if the packets are destined for AP (have a STA entry) send them
- with AP rate policies, otherwise use default basic rates */
- if (control->control.sta)
- tx_attr |= ACX_TX_AP_FULL_RATE << TX_HW_ATTR_OFST_RATE_POLICY;
+ wl1271_debug(DEBUG_TX, "tx_fill_hdr: hlid: %d "
+ "tx_attr: 0x%x len: %d life: %d mem: %d",
+ desc->hlid, tx_attr,
+ le16_to_cpu(desc->length),
+ le16_to_cpu(desc->life_time),
+ desc->wl128x_mem.total_mem_blocks);
+ } else {
+ int pad;
- desc->tx_attr = cpu_to_le16(tx_attr);
+ /* Store the aligned length in terms of words */
+ desc->length = cpu_to_le16(aligned_len >> 2);
- wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad);
+ /* calculate number of padding bytes */
+ pad = aligned_len - skb->len;
+ tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
+
+ wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d hlid: %d "
+ "tx_attr: 0x%x len: %d life: %d mem: %d", pad,
+ desc->hlid, tx_attr,
+ le16_to_cpu(desc->length),
+ le16_to_cpu(desc->life_time),
+ desc->wl127x_mem.total_mem_blocks);
+ }
+
+ desc->tx_attr = cpu_to_le16(tx_attr);
}
/* caller must hold wl->mutex */
@@ -153,8 +346,8 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
struct ieee80211_tx_info *info;
u32 extra = 0;
int ret = 0;
- u8 idx;
u32 total_len;
+ u8 hlid;
if (!skb)
return -EINVAL;
@@ -166,32 +359,56 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
extra = WL1271_TKIP_IV_SPACE;
if (info->control.hw_key) {
- idx = info->control.hw_key->hw_key_idx;
+ bool is_wep;
+ u8 idx = info->control.hw_key->hw_key_idx;
+ u32 cipher = info->control.hw_key->cipher;
+
+ is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
+ (cipher == WLAN_CIPHER_SUITE_WEP104);
- /* FIXME: do we have to do this if we're not using WEP? */
- if (unlikely(wl->default_key != idx)) {
- ret = wl1271_cmd_set_default_wep_key(wl, idx);
+ if (unlikely(is_wep && wl->default_key != idx)) {
+ ret = wl1271_set_default_wep_key(wl, idx);
if (ret < 0)
return ret;
wl->default_key = idx;
}
}
- ret = wl1271_tx_allocate(wl, skb, extra, buf_offset);
+ if (wl->bss_type == BSS_TYPE_AP_BSS)
+ hlid = wl1271_tx_get_hlid(skb);
+ else
+ hlid = TX_HW_DEFAULT_AID;
+
+ ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid);
if (ret < 0)
return ret;
- wl1271_tx_fill_hdr(wl, skb, extra, info);
+ if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ wl1271_tx_ap_update_inconnection_sta(wl, skb);
+ wl1271_tx_regulate_link(wl, hlid);
+ } else {
+ wl1271_tx_update_filters(wl, skb);
+ }
+
+ wl1271_tx_fill_hdr(wl, skb, extra, info, hlid);
/*
- * The length of each packet is stored in terms of words. Thus, we must
- * pad the skb data to make sure its length is aligned.
- * The number of padding bytes is computed and set in wl1271_tx_fill_hdr
+ * The length of each packet is stored in terms of
+ * words. Thus, we must pad the skb data to make sure its
+ * length is aligned. The number of padding bytes is computed
+ * and set in wl1271_tx_fill_hdr.
+ * In special cases, we want to align to a specific block size
+ * (eg. for wl128x with SDIO we align to 256).
*/
- total_len = WL1271_TX_ALIGN(skb->len);
+ total_len = wl12xx_calc_packet_alignment(wl, skb->len);
+
memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
+ /* Revert side effects in the dummy packet skb, so it can be reused */
+ if (wl12xx_is_dummy_packet(wl, skb))
+ skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
+
return total_len;
}
@@ -222,7 +439,7 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
return enabled_rates;
}
-static void handle_tx_low_watermark(struct wl1271 *wl)
+void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
{
unsigned long flags;
@@ -236,7 +453,7 @@ static void handle_tx_low_watermark(struct wl1271 *wl)
}
}
-static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
+static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl)
{
struct sk_buff *skb = NULL;
unsigned long flags;
@@ -262,12 +479,84 @@ out:
return skb;
}
+static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
+{
+ struct sk_buff *skb = NULL;
+ unsigned long flags;
+ int i, h, start_hlid;
+
+ /* start from the link after the last one */
+ start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS;
+
+ /* dequeue according to AC, round robin on each link */
+ for (i = 0; i < AP_MAX_LINKS; i++) {
+ h = (start_hlid + i) % AP_MAX_LINKS;
+
+ skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VO]);
+ if (skb)
+ goto out;
+ skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VI]);
+ if (skb)
+ goto out;
+ skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BE]);
+ if (skb)
+ goto out;
+ skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BK]);
+ if (skb)
+ goto out;
+ }
+
+out:
+ if (skb) {
+ wl->last_tx_hlid = h;
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ wl->tx_queue_count--;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ } else {
+ wl->last_tx_hlid = 0;
+ }
+
+ return skb;
+}
+
+static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
+{
+ unsigned long flags;
+ struct sk_buff *skb = NULL;
+
+ if (wl->bss_type == BSS_TYPE_AP_BSS)
+ skb = wl1271_ap_skb_dequeue(wl);
+ else
+ skb = wl1271_sta_skb_dequeue(wl);
+
+ if (!skb &&
+ test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
+ skb = wl->dummy_packet;
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ wl->tx_queue_count--;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ }
+
+ return skb;
+}
+
static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
{
unsigned long flags;
int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
- skb_queue_head(&wl->tx_queue[q], skb);
+ if (wl12xx_is_dummy_packet(wl, skb)) {
+ set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
+ } else if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ u8 hlid = wl1271_tx_get_hlid(skb);
+ skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
+
+ /* make sure we dequeue the same packet next time */
+ wl->last_tx_hlid = (hlid + AP_MAX_LINKS - 1) % AP_MAX_LINKS;
+ } else {
+ skb_queue_head(&wl->tx_queue[q], skb);
+ }
+
spin_lock_irqsave(&wl->wl_lock, flags);
wl->tx_queue_count++;
spin_unlock_irqrestore(&wl->wl_lock, flags);
@@ -276,44 +565,14 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
void wl1271_tx_work_locked(struct wl1271 *wl)
{
struct sk_buff *skb;
- bool woken_up = false;
- u32 sta_rates = 0;
u32 buf_offset = 0;
bool sent_packets = false;
int ret;
- /* check if the rates supported by the AP have changed */
- if (unlikely(test_and_clear_bit(WL1271_FLAG_STA_RATES_CHANGED,
- &wl->flags))) {
- unsigned long flags;
-
- spin_lock_irqsave(&wl->wl_lock, flags);
- sta_rates = wl->sta_rate_set;
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- }
-
if (unlikely(wl->state == WL1271_STATE_OFF))
- goto out;
-
- /* if rates have changed, re-configure the rate policy */
- if (unlikely(sta_rates)) {
- ret = wl1271_ps_elp_wakeup(wl, false);
- if (ret < 0)
- goto out;
- woken_up = true;
-
- wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates);
- wl1271_acx_rate_policies(wl);
- }
+ return;
while ((skb = wl1271_skb_dequeue(wl))) {
- if (!woken_up) {
- ret = wl1271_ps_elp_wakeup(wl, false);
- if (ret < 0)
- goto out_ack;
- woken_up = true;
- }
-
ret = wl1271_prepare_tx_frame(wl, skb, buf_offset);
if (ret == -EAGAIN) {
/*
@@ -350,22 +609,32 @@ out_ack:
sent_packets = true;
}
if (sent_packets) {
- /* interrupt the firmware with the new packets */
- wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
- handle_tx_low_watermark(wl);
+ /*
+ * Interrupt the firmware with the new packets. This is only
+ * required for older hardware revisions
+ */
+ if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
+ wl1271_write32(wl, WL1271_HOST_WR_ACCESS,
+ wl->tx_packets_count);
+
+ wl1271_handle_tx_low_watermark(wl);
}
-
-out:
- if (woken_up)
- wl1271_ps_elp_sleep(wl);
}
void wl1271_tx_work(struct work_struct *work)
{
struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
+ int ret;
mutex_lock(&wl->mutex);
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
wl1271_tx_work_locked(wl);
+
+ wl1271_ps_elp_sleep(wl);
+out:
mutex_unlock(&wl->mutex);
}
@@ -387,6 +656,11 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
skb = wl->tx_frames[id];
info = IEEE80211_SKB_CB(skb);
+ if (wl12xx_is_dummy_packet(wl, skb)) {
+ wl1271_free_tx_id(wl, id);
+ return;
+ }
+
/* update the TX status info */
if (result->status == TX_SUCCESS) {
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
@@ -427,7 +701,8 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
result->rate_class_index, result->status);
/* return the packet to the stack */
- ieee80211_tx_status(wl->hw, skb);
+ skb_queue_tail(&wl->deferred_tx_queue, skb);
+ ieee80211_queue_work(wl->hw, &wl->netstack_work);
wl1271_free_tx_id(wl, result->id);
}
@@ -469,34 +744,101 @@ void wl1271_tx_complete(struct wl1271 *wl)
}
}
+void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
+{
+ struct sk_buff *skb;
+ int i, total = 0;
+ unsigned long flags;
+ struct ieee80211_tx_info *info;
+
+ for (i = 0; i < NUM_TX_QUEUES; i++) {
+ while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
+ wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
+ info = IEEE80211_SKB_CB(skb);
+ info->status.rates[0].idx = -1;
+ info->status.rates[0].count = 0;
+ ieee80211_tx_status(wl->hw, skb);
+ total++;
+ }
+ }
+
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ wl->tx_queue_count -= total;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ wl1271_handle_tx_low_watermark(wl);
+}
+
/* caller must hold wl->mutex */
void wl1271_tx_reset(struct wl1271 *wl)
{
int i;
struct sk_buff *skb;
+ struct ieee80211_tx_info *info;
/* TX failure */
- for (i = 0; i < NUM_TX_QUEUES; i++) {
- while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
- wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
- ieee80211_tx_status(wl->hw, skb);
+ if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ for (i = 0; i < AP_MAX_LINKS; i++) {
+ wl1271_tx_reset_link_queues(wl, i);
+ wl->links[i].allocated_blks = 0;
+ wl->links[i].prev_freed_blks = 0;
+ }
+
+ wl->last_tx_hlid = 0;
+ } else {
+ for (i = 0; i < NUM_TX_QUEUES; i++) {
+ while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
+ wl1271_debug(DEBUG_TX, "freeing skb 0x%p",
+ skb);
+
+ if (!wl12xx_is_dummy_packet(wl, skb)) {
+ info = IEEE80211_SKB_CB(skb);
+ info->status.rates[0].idx = -1;
+ info->status.rates[0].count = 0;
+ ieee80211_tx_status(wl->hw, skb);
+ }
+ }
}
}
+
wl->tx_queue_count = 0;
/*
* Make sure the driver is at a consistent state, in case this
* function is called from a context other than interface removal.
*/
- handle_tx_low_watermark(wl);
+ wl1271_handle_tx_low_watermark(wl);
+
+ for (i = 0; i < ACX_TX_DESCRIPTORS; i++) {
+ if (wl->tx_frames[i] == NULL)
+ continue;
+
+ skb = wl->tx_frames[i];
+ wl1271_free_tx_id(wl, i);
+ wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
+
+ if (!wl12xx_is_dummy_packet(wl, skb)) {
+ /*
+ * Remove private headers before passing the skb to
+ * mac80211
+ */
+ info = IEEE80211_SKB_CB(skb);
+ skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
+ if (info->control.hw_key &&
+ info->control.hw_key->cipher ==
+ WLAN_CIPHER_SUITE_TKIP) {
+ int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ memmove(skb->data + WL1271_TKIP_IV_SPACE,
+ skb->data, hdrlen);
+ skb_pull(skb, WL1271_TKIP_IV_SPACE);
+ }
+
+ info->status.rates[0].idx = -1;
+ info->status.rates[0].count = 0;
- for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
- if (wl->tx_frames[i] != NULL) {
- skb = wl->tx_frames[i];
- wl1271_free_tx_id(wl, i);
- wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
ieee80211_tx_status(wl->hw, skb);
}
+ }
}
#define WL1271_TX_FLUSH_TIMEOUT 500000
@@ -509,8 +851,8 @@ void wl1271_tx_flush(struct wl1271 *wl)
while (!time_after(jiffies, timeout)) {
mutex_lock(&wl->mutex);
- wl1271_debug(DEBUG_TX, "flushing tx buffer: %d",
- wl->tx_frames_cnt);
+ wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d",
+ wl->tx_frames_cnt, wl->tx_queue_count);
if ((wl->tx_frames_cnt == 0) && (wl->tx_queue_count == 0)) {
mutex_unlock(&wl->mutex);
return;
@@ -521,3 +863,21 @@ void wl1271_tx_flush(struct wl1271 *wl)
wl1271_warning("Unable to flush all TX buffers, timed out.");
}
+
+u32 wl1271_tx_min_rate_get(struct wl1271 *wl)
+{
+ int i;
+ u32 rate = 0;
+
+ if (!wl->basic_rate_set) {
+ WARN_ON(1);
+ wl->basic_rate_set = wl->conf.tx.basic_rate;
+ }
+
+ for (i = 0; !rate; i++) {
+ if ((wl->basic_rate_set >> i) & 0x1)
+ rate = 1 << i;
+ }
+
+ return rate;
+}
diff --git a/drivers/net/wireless/wl12xx/tx.h b/drivers/net/wireless/wl12xx/tx.h
index 903e5dc69b7..fc7835c4cf6 100644
--- a/drivers/net/wireless/wl12xx/tx.h
+++ b/drivers/net/wireless/wl12xx/tx.h
@@ -25,10 +25,10 @@
#ifndef __TX_H__
#define __TX_H__
-#define TX_HW_BLOCK_SPARE 2
#define TX_HW_BLOCK_SIZE 252
#define TX_HW_MGMT_PKT_LIFETIME_TU 2000
+#define TX_HW_AP_MODE_PKT_LIFETIME_TU 8000
/* The chipset reference driver states, that the "aid" value 1
* is for infra-BSS, but is still always used */
#define TX_HW_DEFAULT_AID 1
@@ -40,6 +40,7 @@
BIT(8) | BIT(9))
#define TX_HW_ATTR_LAST_WORD_PAD (BIT(10) | BIT(11))
#define TX_HW_ATTR_TX_CMPLT_REQ BIT(12)
+#define TX_HW_ATTR_TX_DUMMY_REQ BIT(13)
#define TX_HW_ATTR_OFST_SAVE_RETRIES 0
#define TX_HW_ATTR_OFST_HEADER_PAD 1
@@ -52,24 +53,62 @@
#define TX_HW_RESULT_QUEUE_LEN_MASK 0xf
#define WL1271_TX_ALIGN_TO 4
-#define WL1271_TX_ALIGN(len) (((len) + WL1271_TX_ALIGN_TO - 1) & \
- ~(WL1271_TX_ALIGN_TO - 1))
#define WL1271_TKIP_IV_SPACE 4
+/* Used for management frames and dummy packets */
+#define WL1271_TID_MGMT 7
+
+struct wl127x_tx_mem {
+ /*
+ * Number of extra memory blocks to allocate for this packet
+ * in addition to the number of blocks derived from the packet
+ * length.
+ */
+ u8 extra_blocks;
+ /*
+ * Total number of memory blocks allocated by the host for
+ * this packet. Must be equal or greater than the actual
+ * blocks number allocated by HW.
+ */
+ u8 total_mem_blocks;
+} __packed;
+
+struct wl128x_tx_mem {
+ /*
+ * Total number of memory blocks allocated by the host for
+ * this packet.
+ */
+ u8 total_mem_blocks;
+ /*
+ * Number of extra bytes, at the end of the frame. the host
+ * uses this padding to complete each frame to integer number
+ * of SDIO blocks.
+ */
+ u8 extra_bytes;
+} __packed;
+
+/*
+ * On wl128x based devices, when TX packets are aggregated, each packet
+ * size must be aligned to the SDIO block size. The maximum block size
+ * is bounded by the type of the padded bytes field that is sent to the
+ * FW. Currently the type is u8, so the maximum block size is 256 bytes.
+ */
+#define WL12XX_BUS_BLOCK_SIZE min(512u, \
+ (1u << (8 * sizeof(((struct wl128x_tx_mem *) 0)->extra_bytes))))
+
struct wl1271_tx_hw_descr {
/* Length of packet in words, including descriptor+header+data */
__le16 length;
- /* Number of extra memory blocks to allocate for this packet in
- addition to the number of blocks derived from the packet length */
- u8 extra_mem_blocks;
- /* Total number of memory blocks allocated by the host for this packet.
- Must be equal or greater than the actual blocks number allocated by
- HW!! */
- u8 total_mem_blocks;
+ union {
+ struct wl127x_tx_mem wl127x_mem;
+ struct wl128x_tx_mem wl128x_mem;
+ } __packed;
/* Device time (in us) when the packet arrived to the driver */
__le32 start_time;
- /* Max delay in TUs until transmission. The last device time the
- packet can be transmitted is: startTime+(1024*LifeTime) */
+ /*
+ * Max delay in TUs until transmission. The last device time the
+ * packet can be transmitted is: start_time + (1024 * life_time)
+ */
__le16 life_time;
/* Bitwise fields - see TX_ATTR... definitions above. */
__le16 tx_attr;
@@ -77,8 +116,12 @@ struct wl1271_tx_hw_descr {
u8 id;
/* The packet TID value (as User-Priority) */
u8 tid;
- /* Identifier of the remote STA in IBSS, 1 in infra-BSS */
- u8 aid;
+ union {
+ /* STA - Identifier of the remote STA in IBSS, 1 in infra-BSS */
+ u8 aid;
+ /* AP - host link ID (HLID) */
+ u8 hlid;
+ } __packed;
u8 reserved;
} __packed;
@@ -146,5 +189,9 @@ void wl1271_tx_reset(struct wl1271 *wl);
void wl1271_tx_flush(struct wl1271 *wl);
u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set);
+u32 wl1271_tx_min_rate_get(struct wl1271 *wl);
+u8 wl1271_tx_get_hlid(struct sk_buff *skb);
+void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
+void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
#endif
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h
index 9050dd9b62d..077d55bf974 100644
--- a/drivers/net/wireless/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -38,6 +38,13 @@
#define DRIVER_NAME "wl1271"
#define DRIVER_PREFIX DRIVER_NAME ": "
+/*
+ * FW versions support BA 11n
+ * versions marks x.x.x.50-60.x
+ */
+#define WL12XX_BA_SUPPORT_FW_COST_VER2_START 50
+#define WL12XX_BA_SUPPORT_FW_COST_VER2_END 60
+
enum {
DEBUG_NONE = 0,
DEBUG_IRQ = BIT(0),
@@ -57,6 +64,8 @@ enum {
DEBUG_SDIO = BIT(14),
DEBUG_FILTERS = BIT(15),
DEBUG_ADHOC = BIT(16),
+ DEBUG_AP = BIT(17),
+ DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP),
DEBUG_ALL = ~0,
};
@@ -103,17 +112,34 @@ extern u32 wl12xx_debug_level;
true); \
} while (0)
-#define WL1271_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN | \
+#define WL1271_DEFAULT_STA_RX_CONFIG (CFG_UNI_FILTER_EN | \
CFG_BSSID_FILTER_EN | \
CFG_MC_FILTER_EN)
-#define WL1271_DEFAULT_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN | \
+#define WL1271_DEFAULT_STA_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN | \
CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \
CFG_RX_CTL_EN | CFG_RX_BCN_EN | \
CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
-#define WL1271_FW_NAME "wl1271-fw.bin"
-#define WL1271_NVS_NAME "wl1271-nvs.bin"
+#define WL1271_DEFAULT_AP_RX_CONFIG 0
+
+#define WL1271_DEFAULT_AP_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PREQ_EN | \
+ CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \
+ CFG_RX_CTL_EN | CFG_RX_AUTH_EN | \
+ CFG_RX_ASSOC_EN)
+
+
+#define WL1271_FW_NAME "ti-connectivity/wl1271-fw-2.bin"
+#define WL128X_FW_NAME "ti-connectivity/wl128x-fw.bin"
+#define WL127X_AP_FW_NAME "ti-connectivity/wl1271-fw-ap.bin"
+#define WL128X_AP_FW_NAME "ti-connectivity/wl128x-fw-ap.bin"
+
+/*
+ * wl127x and wl128x are using the same NVS file name. However, the
+ * ini parameters between them are different. The driver validates
+ * the correct NVS size in wl1271_boot_upload_nvs().
+ */
+#define WL12XX_NVS_NAME "ti-connectivity/wl1271-nvs.bin"
#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
#define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff))
@@ -129,6 +155,24 @@ extern u32 wl12xx_debug_level;
#define WL1271_DEFAULT_BEACON_INT 100
#define WL1271_DEFAULT_DTIM_PERIOD 1
+#define WL1271_AP_GLOBAL_HLID 0
+#define WL1271_AP_BROADCAST_HLID 1
+#define WL1271_AP_STA_HLID_START 2
+
+/*
+ * When in AP-mode, we allow (at least) this number of mem-blocks
+ * to be transmitted to FW for a STA in PS-mode. Only when packets are
+ * present in the FW buffers it will wake the sleeping STA. We want to put
+ * enough packets for the driver to transmit all of its buffered data before
+ * the STA goes to sleep again. But we don't want to take too much mem-blocks
+ * as it might hurt the throughput of active STAs.
+ * The number of blocks (18) is enough for 2 large packets.
+ */
+#define WL1271_PS_STA_MAX_BLOCKS (2 * 9)
+
+#define WL1271_AP_BSS_INDEX 0
+#define WL1271_AP_DEF_BEACON_EXP 20
+
#define ACX_TX_DESCRIPTORS 32
#define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
@@ -161,10 +205,29 @@ struct wl1271_partition_set {
struct wl1271;
-/* FIXME: I'm not sure about this structure name */
+enum {
+ FW_VER_CHIP,
+ FW_VER_IF_TYPE,
+ FW_VER_MAJOR,
+ FW_VER_SUBTYPE,
+ FW_VER_MINOR,
+
+ NUM_FW_VER
+};
+
+#define FW_VER_CHIP_WL127X 6
+#define FW_VER_CHIP_WL128X 7
+
+#define FW_VER_IF_TYPE_STA 1
+#define FW_VER_IF_TYPE_AP 2
+
+#define FW_VER_MINOR_1_SPARE_STA_MIN 58
+#define FW_VER_MINOR_1_SPARE_AP_MIN 47
+
struct wl1271_chip {
u32 id;
- char fw_ver[21];
+ char fw_ver_str[ETHTOOL_BUSINFO_LEN];
+ unsigned int fw_ver[NUM_FW_VER];
};
struct wl1271_stats {
@@ -178,8 +241,13 @@ struct wl1271_stats {
#define NUM_TX_QUEUES 4
#define NUM_RX_PKT_DESC 8
-/* FW status registers */
-struct wl1271_fw_status {
+#define AP_MAX_STATIONS 5
+
+/* Broadcast and Global links + links to stations */
+#define AP_MAX_LINKS (AP_MAX_STATIONS + 2)
+
+/* FW status registers common for AP/STA */
+struct wl1271_fw_common_status {
__le32 intr;
u8 fw_rx_counter;
u8 drv_rx_counter;
@@ -188,17 +256,54 @@ struct wl1271_fw_status {
__le32 rx_pkt_descs[NUM_RX_PKT_DESC];
__le32 tx_released_blks[NUM_TX_QUEUES];
__le32 fw_localtime;
- __le32 padding[2];
} __packed;
+/* FW status registers for AP */
+struct wl1271_fw_ap_status {
+ struct wl1271_fw_common_status common;
+
+ /* Next fields valid only in AP FW */
+
+ /*
+ * A bitmap (where each bit represents a single HLID)
+ * to indicate if the station is in PS mode.
+ */
+ __le32 link_ps_bitmap;
+
+ /* Number of freed MBs per HLID */
+ u8 tx_lnk_free_blks[AP_MAX_LINKS];
+ u8 padding_1[1];
+} __packed;
+
+/* FW status registers for STA */
+struct wl1271_fw_sta_status {
+ struct wl1271_fw_common_status common;
+
+ u8 tx_total;
+ u8 reserved1;
+ __le16 reserved2;
+ /* Total structure size is 68 bytes */
+ u32 padding;
+} __packed;
+
+struct wl1271_fw_full_status {
+ union {
+ struct wl1271_fw_common_status common;
+ struct wl1271_fw_sta_status sta;
+ struct wl1271_fw_ap_status ap;
+ };
+} __packed;
+
+
struct wl1271_rx_mem_pool_addr {
u32 addr;
u32 addr_extra;
};
+#define WL1271_MAX_CHANNELS 64
struct wl1271_scan {
struct cfg80211_scan_request *req;
- bool *scanned_ch;
+ unsigned long scanned_ch[BITS_TO_LONGS(WL1271_MAX_CHANNELS)];
bool failed;
u8 state;
u8 ssid[IW_ESSID_MAX_SIZE+1];
@@ -216,6 +321,52 @@ struct wl1271_if_operations {
struct device* (*dev)(struct wl1271 *wl);
void (*enable_irq)(struct wl1271 *wl);
void (*disable_irq)(struct wl1271 *wl);
+ void (*set_block_size) (struct wl1271 *wl, unsigned int blksz);
+};
+
+#define MAX_NUM_KEYS 14
+#define MAX_KEY_SIZE 32
+
+struct wl1271_ap_key {
+ u8 id;
+ u8 key_type;
+ u8 key_size;
+ u8 key[MAX_KEY_SIZE];
+ u8 hlid;
+ u32 tx_seq_32;
+ u16 tx_seq_16;
+};
+
+enum wl12xx_flags {
+ WL1271_FLAG_STA_ASSOCIATED,
+ WL1271_FLAG_JOINED,
+ WL1271_FLAG_GPIO_POWER,
+ WL1271_FLAG_TX_QUEUE_STOPPED,
+ WL1271_FLAG_TX_PENDING,
+ WL1271_FLAG_IN_ELP,
+ WL1271_FLAG_ELP_REQUESTED,
+ WL1271_FLAG_PSM,
+ WL1271_FLAG_PSM_REQUESTED,
+ WL1271_FLAG_IRQ_RUNNING,
+ WL1271_FLAG_IDLE,
+ WL1271_FLAG_IDLE_REQUESTED,
+ WL1271_FLAG_PSPOLL_FAILURE,
+ WL1271_FLAG_STA_STATE_SENT,
+ WL1271_FLAG_FW_TX_BUSY,
+ WL1271_FLAG_AP_STARTED,
+ WL1271_FLAG_IF_INITIALIZED,
+ WL1271_FLAG_DUMMY_PACKET_PENDING,
+};
+
+struct wl1271_link {
+ /* AP-mode - TX queue per AC in link */
+ struct sk_buff_head tx_queue[NUM_TX_QUEUES];
+
+ /* accounting for allocated / available TX blocks in FW */
+ u8 allocated_blks;
+ u8 prev_freed_blks;
+
+ u8 addr[ETH_ALEN];
};
struct wl1271 {
@@ -236,21 +387,6 @@ struct wl1271 {
enum wl1271_state state;
struct mutex mutex;
-#define WL1271_FLAG_STA_RATES_CHANGED (0)
-#define WL1271_FLAG_STA_ASSOCIATED (1)
-#define WL1271_FLAG_JOINED (2)
-#define WL1271_FLAG_GPIO_POWER (3)
-#define WL1271_FLAG_TX_QUEUE_STOPPED (4)
-#define WL1271_FLAG_IN_ELP (5)
-#define WL1271_FLAG_PSM (6)
-#define WL1271_FLAG_PSM_REQUESTED (7)
-#define WL1271_FLAG_IRQ_PENDING (8)
-#define WL1271_FLAG_IRQ_RUNNING (9)
-#define WL1271_FLAG_IDLE (10)
-#define WL1271_FLAG_IDLE_REQUESTED (11)
-#define WL1271_FLAG_PSPOLL_FAILURE (12)
-#define WL1271_FLAG_STA_STATE_SENT (13)
-#define WL1271_FLAG_FW_TX_BUSY (14)
unsigned long flags;
struct wl1271_partition_set part;
@@ -262,7 +398,8 @@ struct wl1271 {
u8 *fw;
size_t fw_len;
- struct wl1271_nvs_file *nvs;
+ u8 fw_bss_type;
+ void *nvs;
size_t nvs_len;
s8 hw_pg_ver;
@@ -280,6 +417,7 @@ struct wl1271 {
/* Accounting for allocated / available TX blocks on HW */
u32 tx_blocks_freed[NUM_TX_QUEUES];
u32 tx_blocks_available;
+ u32 tx_allocated_blocks;
u32 tx_results_count;
/* Transmitted TX packets counter for chipset interface */
@@ -295,6 +433,12 @@ struct wl1271 {
struct sk_buff_head tx_queue[NUM_TX_QUEUES];
int tx_queue_count;
+ /* Frames received, not handled yet by mac80211 */
+ struct sk_buff_head deferred_rx_queue;
+
+ /* Frames sent, not returned yet to mac80211 */
+ struct sk_buff_head deferred_tx_queue;
+
struct work_struct tx_work;
/* Pending TX frames */
@@ -315,8 +459,11 @@ struct wl1271 {
/* Intermediate buffer, used for packet aggregation */
u8 *aggr_buf;
- /* The target interrupt mask */
- struct work_struct irq_work;
+ /* Reusable dummy packet template */
+ struct sk_buff *dummy_packet;
+
+ /* Network stack work */
+ struct work_struct netstack_work;
/* Hardware recovery work */
struct work_struct recovery_work;
@@ -343,7 +490,6 @@ struct wl1271 {
* bits 16-23 - 802.11n MCS index mask
* support only 1 stream, thus only 8 bits for the MCS rates (0-7).
*/
- u32 sta_rate_set;
u32 basic_rate_set;
u32 basic_rate;
u32 rate_set;
@@ -378,13 +524,12 @@ struct wl1271 {
int last_rssi_event;
struct wl1271_stats stats;
- struct dentry *rootdir;
__le32 buffer_32;
u32 buffer_cmd;
u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
- struct wl1271_fw_status *fw_status;
+ struct wl1271_fw_full_status *fw_status;
struct wl1271_tx_hw_res_if *tx_res_if;
struct ieee80211_vif *vif;
@@ -400,6 +545,46 @@ struct wl1271 {
/* Most recently reported noise in dBm */
s8 noise;
+
+ /* map for HLIDs of associated stations - when operating in AP mode */
+ unsigned long ap_hlid_map[BITS_TO_LONGS(AP_MAX_STATIONS)];
+
+ /* recoreded keys for AP-mode - set here before AP startup */
+ struct wl1271_ap_key *recorded_ap_keys[MAX_NUM_KEYS];
+
+ /* bands supported by this instance of wl12xx */
+ struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+
+ /* RX BA constraint value */
+ bool ba_support;
+ u8 ba_rx_bitmap;
+
+ int tcxo_clock;
+
+ /*
+ * AP-mode - links indexed by HLID. The global and broadcast links
+ * are always active.
+ */
+ struct wl1271_link links[AP_MAX_LINKS];
+
+ /* the hlid of the link where the last transmitted skb came from */
+ int last_tx_hlid;
+
+ /* AP-mode - a bitmap of links currently in PS mode according to FW */
+ u32 ap_fw_ps_map;
+
+ /* AP-mode - a bitmap of links currently in PS mode in mac80211 */
+ unsigned long ap_ps_map;
+
+ /* Quirks of specific hardware revisions */
+ unsigned int quirks;
+
+ /* Platform limitations */
+ unsigned int platform_quirks;
+};
+
+struct wl1271_station {
+ u8 hlid;
};
int wl1271_plt_start(struct wl1271 *wl);
@@ -414,6 +599,8 @@ int wl1271_plt_stop(struct wl1271 *wl);
#define WL1271_TX_QUEUE_LOW_WATERMARK 10
#define WL1271_TX_QUEUE_HIGH_WATERMARK 25
+#define WL1271_DEFERRED_QUEUE_LIMIT 64
+
/* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power
on in case is has been shut down shortly before */
#define WL1271_PRE_POWER_ON_SLEEP 20 /* in milliseconds */
@@ -423,4 +610,18 @@ int wl1271_plt_stop(struct wl1271 *wl);
#define HW_BG_RATES_MASK 0xffff
#define HW_HT_RATES_OFFSET 16
+/* Quirks */
+
+/* Each RX/TX transaction requires an end-of-transaction transfer */
+#define WL12XX_QUIRK_END_OF_TRANSACTION BIT(0)
+
+/*
+ * Older firmwares use 2 spare TX blocks
+ * (for STA < 6.1.3.50.58 or for AP < 6.2.0.0.47)
+ */
+#define WL12XX_QUIRK_USE_2_SPARE_BLOCKS BIT(1)
+
+/* WL128X requires aggregated packets to be aligned to the SDIO block size */
+#define WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT BIT(2)
+
#endif
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h
index be21032f4dc..67dcf8f28cd 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h
@@ -138,13 +138,13 @@ struct wl12xx_arp_rsp_template {
struct ieee80211_hdr_3addr hdr;
u8 llc_hdr[sizeof(rfc1042_header)];
- u16 llc_type;
+ __be16 llc_type;
struct arphdr arp_hdr;
u8 sender_hw[ETH_ALEN];
- u32 sender_ip;
+ __be32 sender_ip;
u8 target_hw[ETH_ALEN];
- u32 target_ip;
+ __be32 target_ip;
} __packed;
@@ -160,4 +160,9 @@ struct wl12xx_probe_resp_template {
struct wl12xx_ie_country country;
} __packed;
+struct wl12xx_disconn_template {
+ struct ieee80211_header header;
+ __le16 disconn_reason;
+} __packed;
+
#endif
diff --git a/drivers/of/address.c b/drivers/of/address.c
index b4559c58c09..b43ff669e04 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -556,6 +556,20 @@ static int __of_address_to_resource(struct device_node *dev,
}
/**
+ * of_address_count - Return the number of entries in the reg property
+ */
+int of_address_count(struct device_node *np)
+{
+ struct resource temp_res;
+ int num_reg = 0;
+
+ while (of_address_to_resource(np, num_reg, &temp_res) == 0)
+ num_reg++;
+ return num_reg;
+}
+EXPORT_SYMBOL_GPL(of_address_count);
+
+/**
* of_address_to_resource - Translate device tree address and return as resource
*
* Note that if your address is a PIO address, the conversion will fail if
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 710b53bfac6..632ebae7f17 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -496,6 +496,9 @@ EXPORT_SYMBOL(of_find_node_with_property);
const struct of_device_id *of_match_node(const struct of_device_id *matches,
const struct device_node *node)
{
+ if (!matches)
+ return NULL;
+
while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
int match = 1;
if (matches->name[0])
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index c01cd1ac761..fa3587a794e 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -16,6 +16,7 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
+#include <linux/notifier.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
@@ -670,33 +671,266 @@ struct platform_device *of_platform_device_create(struct device_node *np,
}
EXPORT_SYMBOL(of_platform_device_create);
+struct of_platform_prepare_data {
+ struct list_head list;
+ struct device_node *node;
+ struct device *dev; /* assigned device */
+
+ int num_resources;
+ struct resource resource[0];
+};
+
+static LIST_HEAD(of_platform_prepare_list);
+static struct notifier_block of_platform_nb;
+
+static struct of_platform_prepare_data *of_platform_find_prepare_data(
+ struct device_node *node)
+{
+ struct of_platform_prepare_data *prep;
+ list_for_each_entry(prep, &of_platform_prepare_list, list)
+ if (prep->node == node)
+ return prep;
+ return NULL;
+}
+
+static bool of_pdev_match_resources(struct platform_device *pdev,
+ struct of_platform_prepare_data *prep)
+{
+ struct resource *node_res = prep->resource;
+ struct resource *pdev_res;
+ int i, j;
+
+ if (prep->num_resources == 0 || pdev->num_resources == 0)
+ return false;
+
+ dev_dbg(&pdev->dev, "compare dt node %s\n", prep->node->full_name);
+
+ /* Compare both resource tables and make sure every node resource
+ * is represented by the platform device. Here we check that each
+ * resource has corresponding entry with the same type and start
+ * values, and the end value falls inside the range specified
+ * in the device tree node. */
+ for (i = 0; i < prep->num_resources; i++, node_res++) {
+ pr_debug(" node res %2i:%.8x..%.8x[%lx]...\n", i,
+ node_res->start, node_res->end, node_res->flags);
+ pdev_res = pdev->resource;
+ for (j = 0; j < pdev->num_resources; j++, pdev_res++) {
+ pr_debug(" pdev res %2i:%.8x..%.8x[%lx]\n", j,
+ pdev_res->start, pdev_res->end, pdev_res->flags);
+ if ((pdev_res->start == node_res->start) &&
+ (pdev_res->end >= node_res->start) &&
+ (pdev_res->end <= node_res->end) &&
+ (pdev_res->flags == node_res->flags)) {
+ pr_debug(" ...MATCH! :-)\n");
+ break;
+ }
+ }
+ if (j >= pdev->num_resources)
+ return false;
+ }
+ return true;
+}
+
+static int of_platform_device_notifier_call(struct notifier_block *nb,
+ unsigned long event, void *_dev)
+{
+ struct platform_device *pdev = to_platform_device(_dev);
+ struct of_platform_prepare_data *prep;
+
+ switch (event) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ if (pdev->dev.of_node)
+ return NOTIFY_DONE;
+
+ list_for_each_entry(prep, &of_platform_prepare_list, list) {
+ if (prep->dev)
+ continue;
+
+ if (!of_pdev_match_resources(pdev, prep))
+ continue;
+
+ /* If disabled, don't let the device bind */
+ if (!of_device_is_available(prep->node)) {
+ char buf[strlen(pdev->name) + 12];
+ dev_info(&pdev->dev, "disabled by dt node %s\n",
+ prep->node->full_name);
+ sprintf(buf, "%s-disabled", pdev->name);
+ pdev->name = kstrdup(buf, GFP_KERNEL);
+ continue;
+ }
+
+ dev_info(&pdev->dev, "attaching dt node %s\n",
+ prep->node->full_name);
+ prep->dev = get_device(&pdev->dev);
+ pdev->dev.of_node = of_node_get(prep->node);
+ return NOTIFY_OK;
+ }
+ break;
+
+ case BUS_NOTIFY_DEL_DEVICE:
+ list_for_each_entry(prep, &of_platform_prepare_list, list) {
+ if (prep->dev == &pdev->dev) {
+ dev_info(&pdev->dev, "detaching dt node %s\n",
+ prep->node->full_name);
+ of_node_put(pdev->dev.of_node);
+ put_device(prep->dev);
+ pdev->dev.of_node = NULL;
+ prep->dev = NULL;
+ return NOTIFY_OK;
+ }
+ }
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
/**
- * of_platform_bus_create - Create an OF device for a bus node and all its
- * children. Optionally recursively instantiate matching busses.
+ * of_platform_prepare - Flag nodes to be used for creating devices
+ * @root: parent of the first level to probe or NULL for the root of the tree
+ * @bus_match: match table for child bus nodes, or NULL
+ *
+ * This function sets up 'snooping' of device tree registrations and
+ * when a device registration is found that matches a node in the
+ * device tree, it populates the platform_device with a pointer to the
+ * matching node.
+ *
+ * A bus notifier is used to implement this behaviour. When this
+ * function is called, it will parse all the child nodes of @root and
+ * create a lookup table of eligible device nodes. A device node is
+ * considered eligible if it:
+ * a) has a compatible property,
+ * b) has memory mapped registers, and
+ * c) has a mappable interrupt.
+ *
+ * It will also recursively parse child buses providing
+ * a) the child bus node has a ranges property (children have
+ * memory-mapped registers), and
+ * b) it is compatible with the @matches list.
+ *
+ * The lookup table will be used as data for a platform bus notifier
+ * that will compare each new device registration with the table
+ * before a device driver is bound to it. If there is a match, then
+ * the of_node pointer will be added to the device. Therefore it is
+ * important to call this function *before* any platform devices get
+ * registered.
+ */
+void of_platform_prepare(struct device_node *root,
+ const struct of_device_id *matches)
+{
+ struct device_node *child;
+ struct of_platform_prepare_data *prep;
+
+ /* register the notifier if it isn't already */
+ if (!of_platform_nb.notifier_call) {
+ of_platform_nb.notifier_call = of_platform_device_notifier_call;
+ bus_register_notifier(&platform_bus_type, &of_platform_nb);
+ }
+
+ /* If root is null, then start at the root of the tree */
+ root = root ? of_node_get(root) : of_find_node_by_path("/");
+ if (!root)
+ return;
+
+ pr_debug("of_platform_prepare()\n");
+ pr_debug(" starting at: %s\n", root->full_name);
+
+ /* Loop over children and record the details */
+ for_each_child_of_node(root, child) {
+ struct resource *res;
+ int num_irq, num_reg, i;
+
+ /* If this is a bus node, recursively inspect the children,
+ * but *don't* prepare it. Prepare only concerns
+ * itself with leaf-nodes. */
+ if (of_match_node(matches, child)) {
+ of_platform_prepare(child, matches);
+ continue;
+ }
+
+ /* Is it already in the list? */
+ if (of_platform_find_prepare_data(child))
+ continue;
+
+ /* Make sure it has a compatible property */
+ if (!of_get_property(child, "compatible", NULL))
+ continue;
+
+ /*
+ * Count the resources. If the device doesn't have any
+ * register ranges, then it gets skipped because there is no
+ * way to match such a device against static registration
+ */
+ num_irq = of_irq_count(child);
+ num_reg = of_address_count(child);
+ if (!num_reg)
+ continue;
+
+ /* Device node looks valid; record the details */
+ prep = kzalloc(sizeof(*prep) +
+ (sizeof(prep->resource[0]) * (num_irq + num_reg)),
+ GFP_KERNEL);
+ if (!prep)
+ return; /* We're screwed if malloc doesn't work. */
+
+ INIT_LIST_HEAD(&prep->list);
+
+ res = &prep->resource[0];
+ for (i = 0; i < num_reg; i++, res++)
+ WARN_ON(of_address_to_resource(child, i, res));
+ WARN_ON(of_irq_to_resource_table(child, res, num_irq) != num_irq);
+ prep->num_resources = num_reg + num_irq;
+ prep->node = of_node_get(child);
+
+ list_add_tail(&prep->list, &of_platform_prepare_list);
+
+ pr_debug("%s() - %s prepared (%i regs, %i irqs)\n",
+ __func__, prep->node->full_name, num_reg, num_irq);
+ }
+
+}
+
+/**
+ * of_platform_bus_create() - Create a device for a node and its children.
* @bus: device node of the bus to instantiate
- * @matches: match table, NULL to use the default, OF_NO_DEEP_PROBE to
- * disallow recursive creation of child busses
+ * @matches: match table for bus nodes
+ * disallow recursive creation of child buses
+ * @parent: parent for new device, or NULL for top level.
+ *
+ * Creates a platform_device for the provided device_node, and optionally
+ * recursively create devices for all the child nodes.
*/
-static int of_platform_bus_create(const struct device_node *bus,
+static int of_platform_bus_create(struct device_node *bus,
const struct of_device_id *matches,
- struct device *parent)
+ struct device *parent, bool strict)
{
+ struct of_platform_prepare_data *prep;
struct device_node *child;
struct platform_device *dev;
int rc = 0;
+ /* Make sure it has a compatible property */
+ if (strict && (!of_get_property(bus, "compatible", NULL))) {
+ pr_debug("%s() - skipping %s, no compatible prop\n",
+ __func__, bus->full_name);
+ return 0;
+ }
+
+ /* Has the device already been registered manually? */
+ prep = of_platform_find_prepare_data(bus);
+ if (prep && prep->dev) {
+ pr_debug("%s() - skipping %s, already registered\n",
+ __func__, bus->full_name);
+ return 0;
+ }
+
+ dev = of_platform_device_create(bus, NULL, parent);
+ if (!dev || !of_match_node(matches, bus))
+ return 0;
+
for_each_child_of_node(bus, child) {
pr_debug(" create child: %s\n", child->full_name);
- dev = of_platform_device_create(child, NULL, parent);
- if (dev == NULL)
- continue;
-
- if (!of_match_node(matches, child))
- continue;
- if (rc == 0) {
- pr_debug(" and sub busses\n");
- rc = of_platform_bus_create(child, matches, &dev->dev);
- }
+ rc = of_platform_bus_create(child, matches, &dev->dev, strict);
if (rc) {
of_node_put(child);
break;
@@ -706,9 +940,9 @@ static int of_platform_bus_create(const struct device_node *bus,
}
/**
- * of_platform_bus_probe - Probe the device-tree for platform busses
+ * of_platform_bus_probe() - Probe the device-tree for platform buses
* @root: parent of the first level to probe or NULL for the root of the tree
- * @matches: match table, NULL to use the default
+ * @matches: match table for bus nodes
* @parent: parent to hook devices from, NULL for toplevel
*
* Note that children of the provided root are not instantiated as devices
@@ -719,52 +953,67 @@ int of_platform_bus_probe(struct device_node *root,
struct device *parent)
{
struct device_node *child;
- struct platform_device *dev;
int rc = 0;
- if (WARN_ON(!matches || matches == OF_NO_DEEP_PROBE))
- return -EINVAL;
- if (root == NULL)
- root = of_find_node_by_path("/");
- else
- of_node_get(root);
- if (root == NULL)
+ root = root ? of_node_get(root) : of_find_node_by_path("/");
+ if (!root)
return -EINVAL;
pr_debug("of_platform_bus_probe()\n");
pr_debug(" starting at: %s\n", root->full_name);
- /* Do a self check of bus type, if there's a match, create
- * children
- */
+ /* Do a self check of bus type, if there's a match, create children */
if (of_match_node(matches, root)) {
- pr_debug(" root match, create all sub devices\n");
- dev = of_platform_device_create(root, NULL, parent);
- if (dev == NULL)
- goto bail;
-
- pr_debug(" create all sub busses\n");
- rc = of_platform_bus_create(root, matches, &dev->dev);
- goto bail;
- }
- for_each_child_of_node(root, child) {
+ rc = of_platform_bus_create(root, matches, parent, false);
+ } else for_each_child_of_node(root, child) {
if (!of_match_node(matches, child))
continue;
+ rc = of_platform_bus_create(child, matches, parent, false);
+ if (rc)
+ break;
+ }
- pr_debug(" match: %s\n", child->full_name);
- dev = of_platform_device_create(child, NULL, parent);
- if (dev == NULL)
- continue;
+ of_node_put(root);
+ return rc;
+}
+EXPORT_SYMBOL(of_platform_bus_probe);
- rc = of_platform_bus_create(child, matches, &dev->dev);
- if (rc) {
- of_node_put(child);
+/**
+ * of_platform_populate() - Populate platform_devices from device tree data
+ * @root: parent of the first level to probe or NULL for the root of the tree
+ * @matches: match table, NULL to use the default
+ * @parent: parent to hook devices from, NULL for toplevel
+ *
+ * Similar to of_platform_bus_probe(), this function walks the device tree
+ * and creates devices from nodes. It differs in that it follows the modern
+ * convention of requiring all device nodes to have a 'compatible' property,
+ * and it is suitable for creating devices which are children of the root
+ * node (of_platform_bus_probe will only create children of the root which
+ * are selected by the @matches argument).
+ *
+ * New board support should be using this function instead of
+ * of_platform_bus_probe().
+ *
+ * Returns 0 on success, < 0 on failure.
+ */
+int of_platform_populate(struct device_node *root,
+ const struct of_device_id *matches,
+ struct device *parent)
+{
+ struct device_node *child;
+ int rc = 0;
+
+ root = root ? of_node_get(root) : of_find_node_by_path("/");
+ if (!root)
+ return -EINVAL;
+
+ for_each_child_of_node(root, child) {
+ rc = of_platform_bus_create(child, matches, parent, true);
+ if (rc)
break;
- }
}
- bail:
+
of_node_put(root);
return rc;
}
-EXPORT_SYMBOL(of_platform_bus_probe);
#endif /* !CONFIG_SPARC */
diff --git a/drivers/power/jz4740-battery.c b/drivers/power/jz4740-battery.c
index 02414db6a94..763f894ed18 100644
--- a/drivers/power/jz4740-battery.c
+++ b/drivers/power/jz4740-battery.c
@@ -39,7 +39,7 @@ struct jz_battery {
int irq;
int charge_irq;
- struct mfd_cell *cell;
+ const struct mfd_cell *cell;
int status;
long voltage;
@@ -258,7 +258,7 @@ static int __devinit jz_battery_probe(struct platform_device *pdev)
return -ENOMEM;
}
- jz_battery->cell = pdev->dev.platform_data;
+ jz_battery->cell = mfd_get_cell(pdev);
jz_battery->irq = platform_get_irq(pdev, 0);
if (jz_battery->irq < 0) {
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index dd6308499bd..859251250b5 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/88pm860x.h>
struct pm8607_regulator_info {
@@ -394,47 +395,48 @@ static struct pm8607_regulator_info pm8607_regulator_info[] = {
PM8607_LDO(14, LDO14, 0, 4, SUPPLIES_EN12, 6),
};
-static inline struct pm8607_regulator_info *find_regulator_info(int id)
+static int __devinit pm8607_regulator_probe(struct platform_device *pdev)
{
- struct pm8607_regulator_info *info;
+ struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
+ struct pm8607_regulator_info *info = NULL;
+ struct regulator_init_data *pdata;
+ struct mfd_cell *cell;
int i;
+ cell = pdev->dev.platform_data;
+ if (cell == NULL)
+ return -ENODEV;
+ pdata = cell->mfd_data;
+ if (pdata == NULL)
+ return -EINVAL;
+
for (i = 0; i < ARRAY_SIZE(pm8607_regulator_info); i++) {
info = &pm8607_regulator_info[i];
- if (info->desc.id == id)
- return info;
+ if (!strcmp(info->desc.name, pdata->constraints.name))
+ break;
}
- return NULL;
-}
-
-static int __devinit pm8607_regulator_probe(struct platform_device *pdev)
-{
- struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
- struct pm860x_platform_data *pdata = chip->dev->platform_data;
- struct pm8607_regulator_info *info = NULL;
-
- info = find_regulator_info(pdev->id);
- if (info == NULL) {
- dev_err(&pdev->dev, "invalid regulator ID specified\n");
+ if (i > ARRAY_SIZE(pm8607_regulator_info)) {
+ dev_err(&pdev->dev, "Failed to find regulator %s\n",
+ pdata->constraints.name);
return -EINVAL;
}
info->i2c = (chip->id == CHIP_PM8607) ? chip->client : chip->companion;
info->chip = chip;
+ /* check DVC ramp slope double */
+ if (!strcmp(info->desc.name, "BUCK3"))
+ if (info->chip->buck3_double)
+ info->slope_double = 1;
+
info->regulator = regulator_register(&info->desc, &pdev->dev,
- pdata->regulator[pdev->id], info);
+ pdata, info);
if (IS_ERR(info->regulator)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
info->desc.name);
return PTR_ERR(info->regulator);
}
- /* check DVC ramp slope double */
- if (info->desc.id == PM8607_ID_BUCK3)
- if (info->chip->buck3_double)
- info->slope_double = 1;
-
platform_set_drvdata(pdev, info);
return 0;
}
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index e1d943619ab..b9f29e0d429 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -108,6 +108,15 @@ config REGULATOR_MAX8952
via I2C bus. Maxim 8952 has one voltage output and supports 4 DVS
modes ranging from 0.77V to 1.40V by 0.01V steps.
+config REGULATOR_MAX8997
+ tristate "Maxim 8997/8966 regulator"
+ depends on MFD_MAX8997
+ help
+ This driver controls a Maxim 8997/8966 regulator
+ via I2C bus. The provided regulator is suitable for S5PC110,
+ S5PV210, and Exynos-4 chips to control VCC_CORE and
+ VCC_USIM voltages.
+
config REGULATOR_MAX8998
tristate "Maxim 8998 voltage regulator"
depends on MFD_MAX8998
@@ -117,7 +126,7 @@ config REGULATOR_MAX8998
and S5PC1XX chips to control VCC_CORE and VCC_USIM voltages.
config REGULATOR_TWL4030
- bool "TI TWL4030/TWL5030/TWL6030/TPS695x0 PMIC"
+ bool "TI TWL4030/TWL5030/TWL6030/TPS659x0 PMIC"
depends on TWL4030_CORE
help
This driver supports the voltage regulators provided by
@@ -214,6 +223,15 @@ config REGULATOR_AB3100
AB3100 analog baseband dealing with power regulators
for the system.
+config REGULATOR_TPS6105X
+ tristate "TI TPS6105X Power regulators"
+ depends on TPS6105X
+ default y if TPS6105X
+ help
+ This driver supports TPS61050/TPS61052 voltage regulator chips.
+ It is a single boost converter primarily for white LEDs and
+ audio amplifiers.
+
config REGULATOR_TPS65023
tristate "TI TPS65023 Power regulators"
depends on I2C
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 0b5e88c2b8d..d72a4275677 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o
obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
obj-$(CONFIG_REGULATOR_MAX8925) += max8925-regulator.o
obj-$(CONFIG_REGULATOR_MAX8952) += max8952.o
+obj-$(CONFIG_REGULATOR_MAX8997) += max8997.o
obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
@@ -33,7 +34,7 @@ obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
-
+obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o
obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o
obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o
obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index ed6feaf9398..b1d77946e9c 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -17,6 +17,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/mfd/abx500.h>
+#include <linux/mfd/core.h>
/* LDO registers and some handy masking definitions for AB3100 */
#define AB3100_LDO_A 0x40
@@ -205,29 +206,6 @@ static int ab3100_enable_regulator(struct regulator_dev *reg)
return err;
}
- /* Per-regulator power on delay from spec */
- switch (abreg->regreg) {
- case AB3100_LDO_A: /* Fallthrough */
- case AB3100_LDO_C: /* Fallthrough */
- case AB3100_LDO_D: /* Fallthrough */
- case AB3100_LDO_E: /* Fallthrough */
- case AB3100_LDO_H: /* Fallthrough */
- case AB3100_LDO_K:
- udelay(200);
- break;
- case AB3100_LDO_F:
- udelay(600);
- break;
- case AB3100_LDO_G:
- udelay(400);
- break;
- case AB3100_BUCK:
- mdelay(1);
- break;
- default:
- break;
- }
-
return 0;
}
@@ -449,11 +427,37 @@ static int ab3100_get_voltage_regulator_external(struct regulator_dev *reg)
return abreg->plfdata->external_voltage;
}
+static int ab3100_enable_time_regulator(struct regulator_dev *reg)
+{
+ struct ab3100_regulator *abreg = reg->reg_data;
+
+ /* Per-regulator power on delay from spec */
+ switch (abreg->regreg) {
+ case AB3100_LDO_A: /* Fallthrough */
+ case AB3100_LDO_C: /* Fallthrough */
+ case AB3100_LDO_D: /* Fallthrough */
+ case AB3100_LDO_E: /* Fallthrough */
+ case AB3100_LDO_H: /* Fallthrough */
+ case AB3100_LDO_K:
+ return 200;
+ case AB3100_LDO_F:
+ return 600;
+ case AB3100_LDO_G:
+ return 400;
+ case AB3100_BUCK:
+ return 1000;
+ default:
+ break;
+ }
+ return 0;
+}
+
static struct regulator_ops regulator_ops_fixed = {
.enable = ab3100_enable_regulator,
.disable = ab3100_disable_regulator,
.is_enabled = ab3100_is_enabled_regulator,
.get_voltage = ab3100_get_voltage_regulator,
+ .enable_time = ab3100_enable_time_regulator,
};
static struct regulator_ops regulator_ops_variable = {
@@ -463,6 +467,7 @@ static struct regulator_ops regulator_ops_variable = {
.get_voltage = ab3100_get_voltage_regulator,
.set_voltage = ab3100_set_voltage_regulator,
.list_voltage = ab3100_list_voltage_regulator,
+ .enable_time = ab3100_enable_time_regulator,
};
static struct regulator_ops regulator_ops_variable_sleepable = {
@@ -473,6 +478,7 @@ static struct regulator_ops regulator_ops_variable_sleepable = {
.set_voltage = ab3100_set_voltage_regulator,
.set_suspend_voltage = ab3100_set_suspend_voltage_regulator,
.list_voltage = ab3100_list_voltage_regulator,
+ .enable_time = ab3100_enable_time_regulator,
};
/*
@@ -576,7 +582,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
{
- struct ab3100_platform_data *plfdata = pdev->dev.platform_data;
+ struct ab3100_platform_data *plfdata = mfd_get_data(pdev);
int err = 0;
u8 data;
int i;
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index d9a052c53ae..02f3c2333c8 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -9,7 +9,7 @@
* AB8500 peripheral regulators
*
* AB8500 supports the following regulators:
- * VAUX1/2/3, VINTCORE, VTVOUT, VAUDIO, VAMIC1/2, VDMIC, VANA
+ * VAUX1/2/3, VINTCORE, VTVOUT, VUSB, VAUDIO, VAMIC1/2, VDMIC, VANA
*/
#include <linux/init.h>
#include <linux/kernel.h>
@@ -38,6 +38,7 @@
* @voltage_mask: mask to control regulator voltage
* @voltages: supported voltage table
* @voltages_len: number of supported voltages for the regulator
+ * @delay: startup/set voltage delay in us
*/
struct ab8500_regulator_info {
struct device *dev;
@@ -55,6 +56,7 @@ struct ab8500_regulator_info {
u8 voltage_mask;
int const *voltages;
int voltages_len;
+ unsigned int delay;
};
/* voltage tables for the vauxn/vintcore supplies */
@@ -290,6 +292,29 @@ static int ab8500_regulator_set_voltage(struct regulator_dev *rdev,
return ret;
}
+static int ab8500_regulator_enable_time(struct regulator_dev *rdev)
+{
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ return info->delay;
+}
+
+static int ab8500_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
+ unsigned int old_sel,
+ unsigned int new_sel)
+{
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret;
+
+ /* If the regulator isn't on, it won't take time here */
+ ret = ab8500_regulator_is_enabled(rdev);
+ if (ret < 0)
+ return ret;
+ if (!ret)
+ return 0;
+ return info->delay;
+}
+
static struct regulator_ops ab8500_regulator_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
@@ -297,6 +322,8 @@ static struct regulator_ops ab8500_regulator_ops = {
.get_voltage = ab8500_regulator_get_voltage,
.set_voltage = ab8500_regulator_set_voltage,
.list_voltage = ab8500_list_voltage,
+ .enable_time = ab8500_regulator_enable_time,
+ .set_voltage_time_sel = ab8500_regulator_set_voltage_time_sel,
};
static int ab8500_fixed_get_voltage(struct regulator_dev *rdev)
@@ -317,6 +344,8 @@ static struct regulator_ops ab8500_regulator_fixed_ops = {
.is_enabled = ab8500_regulator_is_enabled,
.get_voltage = ab8500_fixed_get_voltage,
.list_voltage = ab8500_list_voltage,
+ .enable_time = ab8500_regulator_enable_time,
+ .set_voltage_time_sel = ab8500_regulator_set_voltage_time_sel,
};
static struct ab8500_regulator_info
@@ -426,12 +455,28 @@ static struct ab8500_regulator_info
.owner = THIS_MODULE,
.n_voltages = 1,
},
+ .delay = 10000,
.fixed_uV = 2000000,
.update_bank = 0x03,
.update_reg = 0x80,
.update_mask = 0x82,
.update_val_enable = 0x02,
},
+ [AB8500_LDO_USB] = {
+ .desc = {
+ .name = "LDO-USB",
+ .ops = &ab8500_regulator_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_USB,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 3300000,
+ .update_bank = 0x03,
+ .update_reg = 0x82,
+ .update_mask = 0x03,
+ .update_val_enable = 0x01,
+ },
[AB8500_LDO_AUDIO] = {
.desc = {
.name = "LDO-AUDIO",
@@ -511,6 +556,186 @@ static struct ab8500_regulator_info
};
+struct ab8500_reg_init {
+ u8 bank;
+ u8 addr;
+ u8 mask;
+};
+
+#define REG_INIT(_id, _bank, _addr, _mask) \
+ [_id] = { \
+ .bank = _bank, \
+ .addr = _addr, \
+ .mask = _mask, \
+ }
+
+static struct ab8500_reg_init ab8500_reg_init[] = {
+ /*
+ * 0x30, VanaRequestCtrl
+ * 0x0C, VpllRequestCtrl
+ * 0xc0, VextSupply1RequestCtrl
+ */
+ REG_INIT(AB8500_REGUREQUESTCTRL2, 0x03, 0x04, 0xfc),
+ /*
+ * 0x03, VextSupply2RequestCtrl
+ * 0x0c, VextSupply3RequestCtrl
+ * 0x30, Vaux1RequestCtrl
+ * 0xc0, Vaux2RequestCtrl
+ */
+ REG_INIT(AB8500_REGUREQUESTCTRL3, 0x03, 0x05, 0xff),
+ /*
+ * 0x03, Vaux3RequestCtrl
+ * 0x04, SwHPReq
+ */
+ REG_INIT(AB8500_REGUREQUESTCTRL4, 0x03, 0x06, 0x07),
+ /*
+ * 0x08, VanaSysClkReq1HPValid
+ * 0x20, Vaux1SysClkReq1HPValid
+ * 0x40, Vaux2SysClkReq1HPValid
+ * 0x80, Vaux3SysClkReq1HPValid
+ */
+ REG_INIT(AB8500_REGUSYSCLKREQ1HPVALID1, 0x03, 0x07, 0xe8),
+ /*
+ * 0x10, VextSupply1SysClkReq1HPValid
+ * 0x20, VextSupply2SysClkReq1HPValid
+ * 0x40, VextSupply3SysClkReq1HPValid
+ */
+ REG_INIT(AB8500_REGUSYSCLKREQ1HPVALID2, 0x03, 0x08, 0x70),
+ /*
+ * 0x08, VanaHwHPReq1Valid
+ * 0x20, Vaux1HwHPReq1Valid
+ * 0x40, Vaux2HwHPReq1Valid
+ * 0x80, Vaux3HwHPReq1Valid
+ */
+ REG_INIT(AB8500_REGUHWHPREQ1VALID1, 0x03, 0x09, 0xe8),
+ /*
+ * 0x01, VextSupply1HwHPReq1Valid
+ * 0x02, VextSupply2HwHPReq1Valid
+ * 0x04, VextSupply3HwHPReq1Valid
+ */
+ REG_INIT(AB8500_REGUHWHPREQ1VALID2, 0x03, 0x0a, 0x07),
+ /*
+ * 0x08, VanaHwHPReq2Valid
+ * 0x20, Vaux1HwHPReq2Valid
+ * 0x40, Vaux2HwHPReq2Valid
+ * 0x80, Vaux3HwHPReq2Valid
+ */
+ REG_INIT(AB8500_REGUHWHPREQ2VALID1, 0x03, 0x0b, 0xe8),
+ /*
+ * 0x01, VextSupply1HwHPReq2Valid
+ * 0x02, VextSupply2HwHPReq2Valid
+ * 0x04, VextSupply3HwHPReq2Valid
+ */
+ REG_INIT(AB8500_REGUHWHPREQ2VALID2, 0x03, 0x0c, 0x07),
+ /*
+ * 0x20, VanaSwHPReqValid
+ * 0x80, Vaux1SwHPReqValid
+ */
+ REG_INIT(AB8500_REGUSWHPREQVALID1, 0x03, 0x0d, 0xa0),
+ /*
+ * 0x01, Vaux2SwHPReqValid
+ * 0x02, Vaux3SwHPReqValid
+ * 0x04, VextSupply1SwHPReqValid
+ * 0x08, VextSupply2SwHPReqValid
+ * 0x10, VextSupply3SwHPReqValid
+ */
+ REG_INIT(AB8500_REGUSWHPREQVALID2, 0x03, 0x0e, 0x1f),
+ /*
+ * 0x02, SysClkReq2Valid1
+ * ...
+ * 0x80, SysClkReq8Valid1
+ */
+ REG_INIT(AB8500_REGUSYSCLKREQVALID1, 0x03, 0x0f, 0xfe),
+ /*
+ * 0x02, SysClkReq2Valid2
+ * ...
+ * 0x80, SysClkReq8Valid2
+ */
+ REG_INIT(AB8500_REGUSYSCLKREQVALID2, 0x03, 0x10, 0xfe),
+ /*
+ * 0x02, VTVoutEna
+ * 0x04, Vintcore12Ena
+ * 0x38, Vintcore12Sel
+ * 0x40, Vintcore12LP
+ * 0x80, VTVoutLP
+ */
+ REG_INIT(AB8500_REGUMISC1, 0x03, 0x80, 0xfe),
+ /*
+ * 0x02, VaudioEna
+ * 0x04, VdmicEna
+ * 0x08, Vamic1Ena
+ * 0x10, Vamic2Ena
+ */
+ REG_INIT(AB8500_VAUDIOSUPPLY, 0x03, 0x83, 0x1e),
+ /*
+ * 0x01, Vamic1_dzout
+ * 0x02, Vamic2_dzout
+ */
+ REG_INIT(AB8500_REGUCTRL1VAMIC, 0x03, 0x84, 0x03),
+ /*
+ * 0x0c, VanaRegu
+ * 0x03, VpllRegu
+ */
+ REG_INIT(AB8500_VPLLVANAREGU, 0x04, 0x06, 0x0f),
+ /*
+ * 0x01, VrefDDREna
+ * 0x02, VrefDDRSleepMode
+ */
+ REG_INIT(AB8500_VREFDDR, 0x04, 0x07, 0x03),
+ /*
+ * 0x03, VextSupply1Regu
+ * 0x0c, VextSupply2Regu
+ * 0x30, VextSupply3Regu
+ * 0x40, ExtSupply2Bypass
+ * 0x80, ExtSupply3Bypass
+ */
+ REG_INIT(AB8500_EXTSUPPLYREGU, 0x04, 0x08, 0xff),
+ /*
+ * 0x03, Vaux1Regu
+ * 0x0c, Vaux2Regu
+ */
+ REG_INIT(AB8500_VAUX12REGU, 0x04, 0x09, 0x0f),
+ /*
+ * 0x03, Vaux3Regu
+ */
+ REG_INIT(AB8500_VRF1VAUX3REGU, 0x04, 0x0a, 0x03),
+ /*
+ * 0x3f, Vsmps1Sel1
+ */
+ REG_INIT(AB8500_VSMPS1SEL1, 0x04, 0x13, 0x3f),
+ /*
+ * 0x0f, Vaux1Sel
+ */
+ REG_INIT(AB8500_VAUX1SEL, 0x04, 0x1f, 0x0f),
+ /*
+ * 0x0f, Vaux2Sel
+ */
+ REG_INIT(AB8500_VAUX2SEL, 0x04, 0x20, 0x0f),
+ /*
+ * 0x07, Vaux3Sel
+ */
+ REG_INIT(AB8500_VRF1VAUX3SEL, 0x04, 0x21, 0x07),
+ /*
+ * 0x01, VextSupply12LP
+ */
+ REG_INIT(AB8500_REGUCTRL2SPARE, 0x04, 0x22, 0x01),
+ /*
+ * 0x04, Vaux1Disch
+ * 0x08, Vaux2Disch
+ * 0x10, Vaux3Disch
+ * 0x20, Vintcore12Disch
+ * 0x40, VTVoutDisch
+ * 0x80, VaudioDisch
+ */
+ REG_INIT(AB8500_REGUCTRLDISCH, 0x04, 0x43, 0xfc),
+ /*
+ * 0x02, VanaDisch
+ * 0x04, VdmicPullDownEna
+ * 0x10, VdmicDisch
+ */
+ REG_INIT(AB8500_REGUCTRLDISCH2, 0x04, 0x44, 0x16),
+};
+
static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
{
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
@@ -529,10 +754,51 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
/* make sure the platform data has the correct size */
if (pdata->num_regulator != ARRAY_SIZE(ab8500_regulator_info)) {
- dev_err(&pdev->dev, "platform configuration error\n");
+ dev_err(&pdev->dev, "Configuration error: size mismatch.\n");
return -EINVAL;
}
+ /* initialize registers */
+ for (i = 0; i < pdata->num_regulator_reg_init; i++) {
+ int id;
+ u8 value;
+
+ id = pdata->regulator_reg_init[i].id;
+ value = pdata->regulator_reg_init[i].value;
+
+ /* check for configuration errors */
+ if (id >= AB8500_NUM_REGULATOR_REGISTERS) {
+ dev_err(&pdev->dev,
+ "Configuration error: id outside range.\n");
+ return -EINVAL;
+ }
+ if (value & ~ab8500_reg_init[id].mask) {
+ dev_err(&pdev->dev,
+ "Configuration error: value outside mask.\n");
+ return -EINVAL;
+ }
+
+ /* initialize register */
+ err = abx500_mask_and_set_register_interruptible(&pdev->dev,
+ ab8500_reg_init[id].bank,
+ ab8500_reg_init[id].addr,
+ ab8500_reg_init[id].mask,
+ value);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "Failed to initialize 0x%02x, 0x%02x.\n",
+ ab8500_reg_init[id].bank,
+ ab8500_reg_init[id].addr);
+ return err;
+ }
+ dev_vdbg(&pdev->dev,
+ " init: 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
+ ab8500_reg_init[id].bank,
+ ab8500_reg_init[id].addr,
+ ab8500_reg_init[id].mask,
+ value);
+ }
+
/* register all regulators */
for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
struct ab8500_regulator_info *info = NULL;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 9fa20957847..3ffc6979d16 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1629,6 +1629,7 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV)
{
int ret;
+ int delay = 0;
unsigned int selector;
trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV);
@@ -1662,6 +1663,22 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
}
}
+ /*
+ * If we can't obtain the old selector there is not enough
+ * info to call set_voltage_time_sel().
+ */
+ if (rdev->desc->ops->set_voltage_time_sel &&
+ rdev->desc->ops->get_voltage_sel) {
+ unsigned int old_selector = 0;
+
+ ret = rdev->desc->ops->get_voltage_sel(rdev);
+ if (ret < 0)
+ return ret;
+ old_selector = ret;
+ delay = rdev->desc->ops->set_voltage_time_sel(rdev,
+ old_selector, selector);
+ }
+
if (best_val != INT_MAX) {
ret = rdev->desc->ops->set_voltage_sel(rdev, selector);
selector = best_val;
@@ -1672,6 +1689,14 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
ret = -EINVAL;
}
+ /* Insert any necessary delays */
+ if (delay >= 1000) {
+ mdelay(delay / 1000);
+ udelay(delay % 1000);
+ } else if (delay) {
+ udelay(delay);
+ }
+
if (ret == 0)
_notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE,
NULL);
@@ -1740,6 +1765,51 @@ out:
EXPORT_SYMBOL_GPL(regulator_set_voltage);
/**
+ * regulator_set_voltage_time - get raise/fall time
+ * @regulator: regulator source
+ * @old_uV: starting voltage in microvolts
+ * @new_uV: target voltage in microvolts
+ *
+ * Provided with the starting and ending voltage, this function attempts to
+ * calculate the time in microseconds required to rise or fall to this new
+ * voltage.
+ */
+int regulator_set_voltage_time(struct regulator *regulator,
+ int old_uV, int new_uV)
+{
+ struct regulator_dev *rdev = regulator->rdev;
+ struct regulator_ops *ops = rdev->desc->ops;
+ int old_sel = -1;
+ int new_sel = -1;
+ int voltage;
+ int i;
+
+ /* Currently requires operations to do this */
+ if (!ops->list_voltage || !ops->set_voltage_time_sel
+ || !rdev->desc->n_voltages)
+ return -EINVAL;
+
+ for (i = 0; i < rdev->desc->n_voltages; i++) {
+ /* We only look for exact voltage matches here */
+ voltage = regulator_list_voltage(regulator, i);
+ if (voltage < 0)
+ return -EINVAL;
+ if (voltage == 0)
+ continue;
+ if (voltage == old_uV)
+ old_sel = i;
+ if (voltage == new_uV)
+ new_sel = i;
+ }
+
+ if (old_sel < 0 || new_sel < 0)
+ return -EINVAL;
+
+ return ops->set_voltage_time_sel(rdev, old_sel, new_sel);
+}
+EXPORT_SYMBOL_GPL(regulator_set_voltage_time);
+
+/**
* regulator_sync_voltage - re-apply last regulator output voltage
* @regulator: regulator source
*
@@ -2565,8 +2635,11 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
init_data->consumer_supplies[i].dev,
init_data->consumer_supplies[i].dev_name,
init_data->consumer_supplies[i].supply);
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(dev, "Failed to set supply %s\n",
+ init_data->consumer_supplies[i].supply);
goto unset_supplies;
+ }
}
list_add(&rdev->list, &regulator_list);
@@ -2653,6 +2726,47 @@ out:
EXPORT_SYMBOL_GPL(regulator_suspend_prepare);
/**
+ * regulator_suspend_finish - resume regulators from system wide suspend
+ *
+ * Turn on regulators that might be turned off by regulator_suspend_prepare
+ * and that should be turned on according to the regulators properties.
+ */
+int regulator_suspend_finish(void)
+{
+ struct regulator_dev *rdev;
+ int ret = 0, error;
+
+ mutex_lock(&regulator_list_mutex);
+ list_for_each_entry(rdev, &regulator_list, list) {
+ struct regulator_ops *ops = rdev->desc->ops;
+
+ mutex_lock(&rdev->mutex);
+ if ((rdev->use_count > 0 || rdev->constraints->always_on) &&
+ ops->enable) {
+ error = ops->enable(rdev);
+ if (error)
+ ret = error;
+ } else {
+ if (!has_full_constraints)
+ goto unlock;
+ if (!ops->disable)
+ goto unlock;
+ if (ops->is_enabled && !ops->is_enabled(rdev))
+ goto unlock;
+
+ error = ops->disable(rdev);
+ if (error)
+ ret = error;
+ }
+unlock:
+ mutex_unlock(&rdev->mutex);
+ }
+ mutex_unlock(&regulator_list_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_suspend_finish);
+
+/**
* regulator_has_full_constraints - the system has fully specified constraints
*
* Calling this function will cause the regulator API to disable all
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
new file mode 100644
index 00000000000..01ef7e9903b
--- /dev/null
+++ b/drivers/regulator/max8997.c
@@ -0,0 +1,1213 @@
+/*
+ * max8997.c - Regulator driver for the Maxim 8997/8966
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@smasung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This driver is based on max8998.c
+ */
+
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/max8997.h>
+#include <linux/mfd/max8997-private.h>
+
+struct max8997_data {
+ struct device *dev;
+ struct max8997_dev *iodev;
+ int num_regulators;
+ struct regulator_dev **rdev;
+ int ramp_delay; /* in mV/us */
+
+ u8 buck1_vol[8];
+ u8 buck2_vol[8];
+ u8 buck5_vol[8];
+ int buck125_gpioindex;
+
+ u8 saved_states[MAX8997_REG_MAX];
+};
+
+static inline void max8997_set_gpio(struct max8997_data *max8997)
+{
+ struct max8997_platform_data *pdata =
+ dev_get_platdata(max8997->iodev->dev);
+ int set3 = (max8997->buck125_gpioindex) & 0x1;
+ int set2 = ((max8997->buck125_gpioindex) >> 1) & 0x1;
+ int set1 = ((max8997->buck125_gpioindex) >> 2) & 0x1;
+
+ gpio_set_value(pdata->buck125_gpios[0], set1);
+ gpio_set_value(pdata->buck125_gpios[1], set2);
+ gpio_set_value(pdata->buck125_gpios[2], set3);
+}
+
+struct voltage_map_desc {
+ int min;
+ int max;
+ int step;
+ unsigned int n_bits;
+};
+
+/* Voltage maps in mV */
+static const struct voltage_map_desc ldo_voltage_map_desc = {
+ .min = 800, .max = 3950, .step = 50, .n_bits = 6,
+}; /* LDO1 ~ 18, 21 all */
+
+static const struct voltage_map_desc buck1245_voltage_map_desc = {
+ .min = 650, .max = 2225, .step = 25, .n_bits = 6,
+}; /* Buck1, 2, 4, 5 */
+
+static const struct voltage_map_desc buck37_voltage_map_desc = {
+ .min = 750, .max = 3900, .step = 50, .n_bits = 6,
+}; /* Buck3, 7 */
+
+/* current map in mA */
+static const struct voltage_map_desc charger_current_map_desc = {
+ .min = 200, .max = 950, .step = 50, .n_bits = 4,
+};
+
+static const struct voltage_map_desc topoff_current_map_desc = {
+ .min = 50, .max = 200, .step = 10, .n_bits = 4,
+};
+
+static const struct voltage_map_desc *reg_voltage_map[] = {
+ [MAX8997_LDO1] = &ldo_voltage_map_desc,
+ [MAX8997_LDO2] = &ldo_voltage_map_desc,
+ [MAX8997_LDO3] = &ldo_voltage_map_desc,
+ [MAX8997_LDO4] = &ldo_voltage_map_desc,
+ [MAX8997_LDO5] = &ldo_voltage_map_desc,
+ [MAX8997_LDO6] = &ldo_voltage_map_desc,
+ [MAX8997_LDO7] = &ldo_voltage_map_desc,
+ [MAX8997_LDO8] = &ldo_voltage_map_desc,
+ [MAX8997_LDO9] = &ldo_voltage_map_desc,
+ [MAX8997_LDO10] = &ldo_voltage_map_desc,
+ [MAX8997_LDO11] = &ldo_voltage_map_desc,
+ [MAX8997_LDO12] = &ldo_voltage_map_desc,
+ [MAX8997_LDO13] = &ldo_voltage_map_desc,
+ [MAX8997_LDO14] = &ldo_voltage_map_desc,
+ [MAX8997_LDO15] = &ldo_voltage_map_desc,
+ [MAX8997_LDO16] = &ldo_voltage_map_desc,
+ [MAX8997_LDO17] = &ldo_voltage_map_desc,
+ [MAX8997_LDO18] = &ldo_voltage_map_desc,
+ [MAX8997_LDO21] = &ldo_voltage_map_desc,
+ [MAX8997_BUCK1] = &buck1245_voltage_map_desc,
+ [MAX8997_BUCK2] = &buck1245_voltage_map_desc,
+ [MAX8997_BUCK3] = &buck37_voltage_map_desc,
+ [MAX8997_BUCK4] = &buck1245_voltage_map_desc,
+ [MAX8997_BUCK5] = &buck1245_voltage_map_desc,
+ [MAX8997_BUCK6] = NULL,
+ [MAX8997_BUCK7] = &buck37_voltage_map_desc,
+ [MAX8997_EN32KHZ_AP] = NULL,
+ [MAX8997_EN32KHZ_CP] = NULL,
+ [MAX8997_ENVICHG] = NULL,
+ [MAX8997_ESAFEOUT1] = NULL,
+ [MAX8997_ESAFEOUT2] = NULL,
+ [MAX8997_CHARGER_CV] = NULL,
+ [MAX8997_CHARGER] = &charger_current_map_desc,
+ [MAX8997_CHARGER_TOPOFF] = &topoff_current_map_desc,
+};
+
+static inline int max8997_get_rid(struct regulator_dev *rdev)
+{
+ return rdev_get_id(rdev);
+}
+
+static int max8997_list_voltage_safeout(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ int rid = max8997_get_rid(rdev);
+
+ if (rid == MAX8997_ESAFEOUT1 || rid == MAX8997_ESAFEOUT2) {
+ switch (selector) {
+ case 0:
+ return 4850000;
+ case 1:
+ return 4900000;
+ case 2:
+ return 4950000;
+ case 3:
+ return 3300000;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int max8997_list_voltage_charger_cv(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ int rid = max8997_get_rid(rdev);
+
+ if (rid != MAX8997_CHARGER_CV)
+ goto err;
+
+ switch (selector) {
+ case 0x00:
+ return 4200000;
+ case 0x01 ... 0x0E:
+ return 4000000 + 20000 * (selector - 0x01);
+ case 0x0F:
+ return 4350000;
+ default:
+ return -EINVAL;
+ }
+err:
+ return -EINVAL;
+}
+
+static int max8997_list_voltage(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ const struct voltage_map_desc *desc;
+ int rid = max8997_get_rid(rdev);
+ int val;
+
+ if (rid >= ARRAY_SIZE(reg_voltage_map) ||
+ rid < 0)
+ return -EINVAL;
+
+ desc = reg_voltage_map[rid];
+ if (desc == NULL)
+ return -EINVAL;
+
+ val = desc->min + desc->step * selector;
+ if (val > desc->max)
+ return -EINVAL;
+
+ return val * 1000;
+}
+
+static int max8997_get_enable_register(struct regulator_dev *rdev,
+ int *reg, int *mask, int *pattern)
+{
+ int rid = max8997_get_rid(rdev);
+
+ switch (rid) {
+ case MAX8997_LDO1 ... MAX8997_LDO21:
+ *reg = MAX8997_REG_LDO1CTRL + (rid - MAX8997_LDO1);
+ *mask = 0xC0;
+ *pattern = 0xC0;
+ break;
+ case MAX8997_BUCK1:
+ *reg = MAX8997_REG_BUCK1CTRL;
+ *mask = 0x01;
+ *pattern = 0x01;
+ break;
+ case MAX8997_BUCK2:
+ *reg = MAX8997_REG_BUCK2CTRL;
+ *mask = 0x01;
+ *pattern = 0x01;
+ break;
+ case MAX8997_BUCK3:
+ *reg = MAX8997_REG_BUCK3CTRL;
+ *mask = 0x01;
+ *pattern = 0x01;
+ break;
+ case MAX8997_BUCK4:
+ *reg = MAX8997_REG_BUCK4CTRL;
+ *mask = 0x01;
+ *pattern = 0x01;
+ break;
+ case MAX8997_BUCK5:
+ *reg = MAX8997_REG_BUCK5CTRL;
+ *mask = 0x01;
+ *pattern = 0x01;
+ break;
+ case MAX8997_BUCK6:
+ *reg = MAX8997_REG_BUCK6CTRL;
+ *mask = 0x01;
+ *pattern = 0x01;
+ break;
+ case MAX8997_BUCK7:
+ *reg = MAX8997_REG_BUCK7CTRL;
+ *mask = 0x01;
+ *pattern = 0x01;
+ break;
+ case MAX8997_EN32KHZ_AP ... MAX8997_EN32KHZ_CP:
+ *reg = MAX8997_REG_MAINCON1;
+ *mask = 0x01 << (rid - MAX8997_EN32KHZ_AP);
+ *pattern = 0x01 << (rid - MAX8997_EN32KHZ_AP);
+ break;
+ case MAX8997_ENVICHG:
+ *reg = MAX8997_REG_MBCCTRL1;
+ *mask = 0x80;
+ *pattern = 0x80;
+ break;
+ case MAX8997_ESAFEOUT1 ... MAX8997_ESAFEOUT2:
+ *reg = MAX8997_REG_SAFEOUTCTRL;
+ *mask = 0x40 << (rid - MAX8997_ESAFEOUT1);
+ *pattern = 0x40 << (rid - MAX8997_ESAFEOUT1);
+ break;
+ case MAX8997_CHARGER:
+ *reg = MAX8997_REG_MBCCTRL2;
+ *mask = 0x40;
+ *pattern = 0x40;
+ break;
+ default:
+ /* Not controllable or not exists */
+ return -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+
+static int max8997_reg_is_enabled(struct regulator_dev *rdev)
+{
+ struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+ struct i2c_client *i2c = max8997->iodev->i2c;
+ int ret, reg, mask, pattern;
+ u8 val;
+
+ ret = max8997_get_enable_register(rdev, &reg, &mask, &pattern);
+ if (ret == -EINVAL)
+ return 1; /* "not controllable" */
+ else if (ret)
+ return ret;
+
+ ret = max8997_read_reg(i2c, reg, &val);
+ if (ret)
+ return ret;
+
+ return (val & mask) == pattern;
+}
+
+static int max8997_reg_enable(struct regulator_dev *rdev)
+{
+ struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+ struct i2c_client *i2c = max8997->iodev->i2c;
+ int ret, reg, mask, pattern;
+
+ ret = max8997_get_enable_register(rdev, &reg, &mask, &pattern);
+ if (ret)
+ return ret;
+
+ return max8997_update_reg(i2c, reg, pattern, mask);
+}
+
+static int max8997_reg_disable(struct regulator_dev *rdev)
+{
+ struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+ struct i2c_client *i2c = max8997->iodev->i2c;
+ int ret, reg, mask, pattern;
+
+ ret = max8997_get_enable_register(rdev, &reg, &mask, &pattern);
+ if (ret)
+ return ret;
+
+ return max8997_update_reg(i2c, reg, ~pattern, mask);
+}
+
+static int max8997_get_voltage_register(struct regulator_dev *rdev,
+ int *_reg, int *_shift, int *_mask)
+{
+ int rid = max8997_get_rid(rdev);
+ int reg, shift = 0, mask = 0x3f;
+
+ switch (rid) {
+ case MAX8997_LDO1 ... MAX8997_LDO21:
+ reg = MAX8997_REG_LDO1CTRL + (rid - MAX8997_LDO1);
+ break;
+ case MAX8997_BUCK1:
+ reg = MAX8997_REG_BUCK1DVS1;
+ break;
+ case MAX8997_BUCK2:
+ reg = MAX8997_REG_BUCK2DVS1;
+ break;
+ case MAX8997_BUCK3:
+ reg = MAX8997_REG_BUCK3DVS;
+ break;
+ case MAX8997_BUCK4:
+ reg = MAX8997_REG_BUCK4DVS;
+ break;
+ case MAX8997_BUCK5:
+ reg = MAX8997_REG_BUCK5DVS1;
+ break;
+ case MAX8997_BUCK7:
+ reg = MAX8997_REG_BUCK7DVS;
+ break;
+ case MAX8997_ESAFEOUT1 ... MAX8997_ESAFEOUT2:
+ reg = MAX8997_REG_SAFEOUTCTRL;
+ shift = (rid == MAX8997_ESAFEOUT2) ? 2 : 0;
+ mask = 0x3;
+ break;
+ case MAX8997_CHARGER_CV:
+ reg = MAX8997_REG_MBCCTRL3;
+ shift = 0;
+ mask = 0xf;
+ break;
+ case MAX8997_CHARGER:
+ reg = MAX8997_REG_MBCCTRL4;
+ shift = 0;
+ mask = 0xf;
+ break;
+ case MAX8997_CHARGER_TOPOFF:
+ reg = MAX8997_REG_MBCCTRL5;
+ shift = 0;
+ mask = 0xf;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *_reg = reg;
+ *_shift = shift;
+ *_mask = mask;
+
+ return 0;
+}
+
+static int max8997_get_voltage(struct regulator_dev *rdev)
+{
+ struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+ struct max8997_platform_data *pdata =
+ dev_get_platdata(max8997->iodev->dev);
+ struct i2c_client *i2c = max8997->iodev->i2c;
+ int reg, shift, mask, ret;
+ int rid = max8997_get_rid(rdev);
+ u8 val;
+
+ ret = max8997_get_voltage_register(rdev, &reg, &shift, &mask);
+ if (ret)
+ return ret;
+
+ if ((rid == MAX8997_BUCK1 && pdata->buck1_gpiodvs) ||
+ (rid == MAX8997_BUCK2 && pdata->buck2_gpiodvs) ||
+ (rid == MAX8997_BUCK5 && pdata->buck5_gpiodvs))
+ reg += max8997->buck125_gpioindex;
+
+ ret = max8997_read_reg(i2c, reg, &val);
+ if (ret)
+ return ret;
+
+ val >>= shift;
+ val &= mask;
+
+ if (rdev->desc && rdev->desc->ops && rdev->desc->ops->list_voltage)
+ return rdev->desc->ops->list_voltage(rdev, val);
+
+ /*
+ * max8997_list_voltage returns value for any rdev with voltage_map,
+ * which works for "CHARGER" and "CHARGER TOPOFF" that do not have
+ * list_voltage ops (they are current regulators).
+ */
+ return max8997_list_voltage(rdev, val);
+}
+
+static inline int max8997_get_voltage_proper_val(
+ const struct voltage_map_desc *desc,
+ int min_vol, int max_vol)
+{
+ int i = 0;
+
+ if (desc == NULL)
+ return -EINVAL;
+
+ if (max_vol < desc->min || min_vol > desc->max)
+ return -EINVAL;
+
+ while (desc->min + desc->step * i < min_vol &&
+ desc->min + desc->step * i < desc->max)
+ i++;
+
+ if (desc->min + desc->step * i > max_vol)
+ return -EINVAL;
+
+ if (i >= (1 << desc->n_bits))
+ return -EINVAL;
+
+ return i;
+}
+
+static int max8997_set_voltage_charger_cv(struct regulator_dev *rdev,
+ int min_uV, int max_uV, unsigned *selector)
+{
+ struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+ struct i2c_client *i2c = max8997->iodev->i2c;
+ int rid = max8997_get_rid(rdev);
+ int lb, ub;
+ int reg, shift = 0, mask, ret = 0;
+ u8 val = 0x0;
+
+ if (rid != MAX8997_CHARGER_CV)
+ return -EINVAL;
+
+ ret = max8997_get_voltage_register(rdev, &reg, &shift, &mask);
+ if (ret)
+ return ret;
+
+ if (max_uV < 4000000 || min_uV > 4350000)
+ return -EINVAL;
+
+ if (min_uV <= 4000000) {
+ if (max_uV >= 4000000)
+ return -EINVAL;
+ else
+ val = 0x1;
+ } else if (min_uV <= 4200000 && max_uV >= 4200000)
+ val = 0x0;
+ else {
+ lb = (min_uV - 4000001) / 20000 + 2;
+ ub = (max_uV - 4000000) / 20000 + 1;
+
+ if (lb > ub)
+ return -EINVAL;
+
+ if (lb < 0xf)
+ val = lb;
+ else {
+ if (ub >= 0xf)
+ val = 0xf;
+ else
+ return -EINVAL;
+ }
+ }
+
+ *selector = val;
+
+ ret = max8997_update_reg(i2c, reg, val << shift, mask);
+
+ return ret;
+}
+
+/*
+ * For LDO1 ~ LDO21, BUCK1~5, BUCK7, CHARGER, CHARGER_TOPOFF
+ * BUCK1, 2, and 5 are available if they are not controlled by gpio
+ */
+static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
+ int min_uV, int max_uV, unsigned *selector)
+{
+ struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+ struct i2c_client *i2c = max8997->iodev->i2c;
+ int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
+ const struct voltage_map_desc *desc;
+ int rid = max8997_get_rid(rdev);
+ int reg, shift = 0, mask, ret;
+ int i;
+ u8 org;
+
+ switch (rid) {
+ case MAX8997_LDO1 ... MAX8997_LDO21:
+ break;
+ case MAX8997_BUCK1 ... MAX8997_BUCK5:
+ break;
+ case MAX8997_BUCK6:
+ return -EINVAL;
+ case MAX8997_BUCK7:
+ break;
+ case MAX8997_CHARGER:
+ break;
+ case MAX8997_CHARGER_TOPOFF:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ desc = reg_voltage_map[rid];
+
+ i = max8997_get_voltage_proper_val(desc, min_vol, max_vol);
+ if (i < 0)
+ return i;
+
+ ret = max8997_get_voltage_register(rdev, &reg, &shift, &mask);
+ if (ret)
+ return ret;
+
+ max8997_read_reg(i2c, reg, &org);
+ org = (org & mask) >> shift;
+
+ ret = max8997_update_reg(i2c, reg, i << shift, mask << shift);
+ *selector = i;
+
+ if (rid == MAX8997_BUCK1 || rid == MAX8997_BUCK2 ||
+ rid == MAX8997_BUCK4 || rid == MAX8997_BUCK5) {
+ /* If the voltage is increasing */
+ if (org < i)
+ udelay(desc->step * (i - org) / max8997->ramp_delay);
+ }
+
+ return ret;
+}
+
+/*
+ * Assess the damage on the voltage setting of BUCK1,2,5 by the change.
+ *
+ * When GPIO-DVS mode is used for multiple bucks, changing the voltage value
+ * of one of the bucks may affect that of another buck, which is the side
+ * effect of the change (set_voltage). This function examines the GPIO-DVS
+ * configurations and checks whether such side-effect exists.
+ */
+static int max8997_assess_side_effect(struct regulator_dev *rdev,
+ u8 new_val, int *best)
+{
+ struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+ struct max8997_platform_data *pdata =
+ dev_get_platdata(max8997->iodev->dev);
+ int rid = max8997_get_rid(rdev);
+ u8 *buckx_val[3];
+ bool buckx_gpiodvs[3];
+ int side_effect[8];
+ int min_side_effect = INT_MAX;
+ int i;
+
+ *best = -1;
+
+ switch (rid) {
+ case MAX8997_BUCK1:
+ rid = 0;
+ break;
+ case MAX8997_BUCK2:
+ rid = 1;
+ break;
+ case MAX8997_BUCK5:
+ rid = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ buckx_val[0] = max8997->buck1_vol;
+ buckx_val[1] = max8997->buck2_vol;
+ buckx_val[2] = max8997->buck5_vol;
+ buckx_gpiodvs[0] = pdata->buck1_gpiodvs;
+ buckx_gpiodvs[1] = pdata->buck2_gpiodvs;
+ buckx_gpiodvs[2] = pdata->buck5_gpiodvs;
+
+ for (i = 0; i < 8; i++) {
+ int others;
+
+ if (new_val != (buckx_val[rid])[i]) {
+ side_effect[i] = -1;
+ continue;
+ }
+
+ side_effect[i] = 0;
+ for (others = 0; others < 3; others++) {
+ int diff;
+
+ if (others == rid)
+ continue;
+ if (buckx_gpiodvs[others] == false)
+ continue; /* Not affected */
+ diff = (buckx_val[others])[i] -
+ (buckx_val[others])[max8997->buck125_gpioindex];
+ if (diff > 0)
+ side_effect[i] += diff;
+ else if (diff < 0)
+ side_effect[i] -= diff;
+ }
+ if (side_effect[i] == 0) {
+ *best = i;
+ return 0; /* NO SIDE EFFECT! Use This! */
+ }
+ if (side_effect[i] < min_side_effect) {
+ min_side_effect = side_effect[i];
+ *best = i;
+ }
+ }
+
+ if (*best == -1)
+ return -EINVAL;
+
+ return side_effect[*best];
+}
+
+/*
+ * For Buck 1 ~ 5 and 7. If it is not controlled by GPIO, this calls
+ * max8997_set_voltage_ldobuck to do the job.
+ */
+static int max8997_set_voltage_buck(struct regulator_dev *rdev,
+ int min_uV, int max_uV, unsigned *selector)
+{
+ struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+ struct max8997_platform_data *pdata =
+ dev_get_platdata(max8997->iodev->dev);
+ int rid = max8997_get_rid(rdev);
+ const struct voltage_map_desc *desc;
+ int new_val, new_idx, damage, tmp_val, tmp_idx, tmp_dmg;
+ bool gpio_dvs_mode = false;
+ int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
+
+ if (rid < MAX8997_BUCK1 || rid > MAX8997_BUCK7)
+ return -EINVAL;
+
+ switch (rid) {
+ case MAX8997_BUCK1:
+ if (pdata->buck1_gpiodvs)
+ gpio_dvs_mode = true;
+ break;
+ case MAX8997_BUCK2:
+ if (pdata->buck2_gpiodvs)
+ gpio_dvs_mode = true;
+ break;
+ case MAX8997_BUCK5:
+ if (pdata->buck5_gpiodvs)
+ gpio_dvs_mode = true;
+ break;
+ }
+
+ if (!gpio_dvs_mode)
+ return max8997_set_voltage_ldobuck(rdev, min_uV, max_uV,
+ selector);
+
+ desc = reg_voltage_map[rid];
+ new_val = max8997_get_voltage_proper_val(desc, min_vol, max_vol);
+ if (new_val < 0)
+ return new_val;
+
+ tmp_dmg = INT_MAX;
+ tmp_idx = -1;
+ tmp_val = -1;
+ do {
+ damage = max8997_assess_side_effect(rdev, new_val, &new_idx);
+ if (damage == 0)
+ goto out;
+
+ if (tmp_dmg > damage) {
+ tmp_idx = new_idx;
+ tmp_val = new_val;
+ tmp_dmg = damage;
+ }
+
+ new_val++;
+ } while (desc->min + desc->step + new_val <= desc->max);
+
+ new_idx = tmp_idx;
+ new_val = tmp_val;
+
+ if (pdata->ignore_gpiodvs_side_effect == false)
+ return -EINVAL;
+
+ dev_warn(&rdev->dev, "MAX8997 GPIO-DVS Side Effect Warning: GPIO SET:"
+ " %d -> %d\n", max8997->buck125_gpioindex, tmp_idx);
+
+out:
+ if (new_idx < 0 || new_val < 0)
+ return -EINVAL;
+
+ max8997->buck125_gpioindex = new_idx;
+ max8997_set_gpio(max8997);
+ *selector = new_val;
+
+ return 0;
+}
+
+static const int safeoutvolt[] = {
+ 3300000,
+ 4850000,
+ 4900000,
+ 4950000,
+};
+
+/* For SAFEOUT1 and SAFEOUT2 */
+static int max8997_set_voltage_safeout(struct regulator_dev *rdev,
+ int min_uV, int max_uV, unsigned *selector)
+{
+ struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+ struct i2c_client *i2c = max8997->iodev->i2c;
+ int rid = max8997_get_rid(rdev);
+ int reg, shift = 0, mask, ret;
+ int i = 0;
+ u8 val;
+
+ if (rid != MAX8997_ESAFEOUT1 && rid != MAX8997_ESAFEOUT2)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(safeoutvolt); i++) {
+ if (min_uV <= safeoutvolt[i] &&
+ max_uV >= safeoutvolt[i])
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(safeoutvolt))
+ return -EINVAL;
+
+ if (i == 0)
+ val = 0x3;
+ else
+ val = i - 1;
+
+ ret = max8997_get_voltage_register(rdev, &reg, &shift, &mask);
+ if (ret)
+ return ret;
+
+ ret = max8997_update_reg(i2c, reg, val << shift, mask << shift);
+ *selector = val;
+
+ return ret;
+}
+
+static int max8997_reg_enable_suspend(struct regulator_dev *rdev)
+{
+ return 0;
+}
+
+static int max8997_reg_disable_suspend(struct regulator_dev *rdev)
+{
+ struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+ struct i2c_client *i2c = max8997->iodev->i2c;
+ int ret, reg, mask, pattern;
+ int rid = max8997_get_rid(rdev);
+
+ ret = max8997_get_enable_register(rdev, &reg, &mask, &pattern);
+ if (ret)
+ return ret;
+
+ max8997_read_reg(i2c, reg, &max8997->saved_states[rid]);
+
+ if (rid == MAX8997_LDO1 ||
+ rid == MAX8997_LDO10 ||
+ rid == MAX8997_LDO21) {
+ dev_dbg(&rdev->dev, "Conditional Power-Off for %s\n",
+ rdev->desc->name);
+ return max8997_update_reg(i2c, reg, 0x40, mask);
+ }
+
+ dev_dbg(&rdev->dev, "Full Power-Off for %s (%xh -> %xh)\n",
+ rdev->desc->name, max8997->saved_states[rid] & mask,
+ (~pattern) & mask);
+ return max8997_update_reg(i2c, reg, ~pattern, mask);
+}
+
+static struct regulator_ops max8997_ldo_ops = {
+ .list_voltage = max8997_list_voltage,
+ .is_enabled = max8997_reg_is_enabled,
+ .enable = max8997_reg_enable,
+ .disable = max8997_reg_disable,
+ .get_voltage = max8997_get_voltage,
+ .set_voltage = max8997_set_voltage_ldobuck,
+ .set_suspend_enable = max8997_reg_enable_suspend,
+ .set_suspend_disable = max8997_reg_disable_suspend,
+};
+
+static struct regulator_ops max8997_buck_ops = {
+ .list_voltage = max8997_list_voltage,
+ .is_enabled = max8997_reg_is_enabled,
+ .enable = max8997_reg_enable,
+ .disable = max8997_reg_disable,
+ .get_voltage = max8997_get_voltage,
+ .set_voltage = max8997_set_voltage_buck,
+ .set_suspend_enable = max8997_reg_enable_suspend,
+ .set_suspend_disable = max8997_reg_disable_suspend,
+};
+
+static struct regulator_ops max8997_fixedvolt_ops = {
+ .list_voltage = max8997_list_voltage,
+ .is_enabled = max8997_reg_is_enabled,
+ .enable = max8997_reg_enable,
+ .disable = max8997_reg_disable,
+ .set_suspend_enable = max8997_reg_enable_suspend,
+ .set_suspend_disable = max8997_reg_disable_suspend,
+};
+
+static struct regulator_ops max8997_safeout_ops = {
+ .list_voltage = max8997_list_voltage_safeout,
+ .is_enabled = max8997_reg_is_enabled,
+ .enable = max8997_reg_enable,
+ .disable = max8997_reg_disable,
+ .get_voltage = max8997_get_voltage,
+ .set_voltage = max8997_set_voltage_safeout,
+ .set_suspend_enable = max8997_reg_enable_suspend,
+ .set_suspend_disable = max8997_reg_disable_suspend,
+};
+
+static struct regulator_ops max8997_fixedstate_ops = {
+ .list_voltage = max8997_list_voltage_charger_cv,
+ .get_voltage = max8997_get_voltage,
+ .set_voltage = max8997_set_voltage_charger_cv,
+};
+
+static int max8997_set_voltage_ldobuck_wrap(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ unsigned dummy;
+
+ return max8997_set_voltage_ldobuck(rdev, min_uV, max_uV, &dummy);
+}
+
+
+static struct regulator_ops max8997_charger_ops = {
+ .is_enabled = max8997_reg_is_enabled,
+ .enable = max8997_reg_enable,
+ .disable = max8997_reg_disable,
+ .get_current_limit = max8997_get_voltage,
+ .set_current_limit = max8997_set_voltage_ldobuck_wrap,
+};
+
+static struct regulator_ops max8997_charger_fixedstate_ops = {
+ .is_enabled = max8997_reg_is_enabled,
+ .get_current_limit = max8997_get_voltage,
+ .set_current_limit = max8997_set_voltage_ldobuck_wrap,
+};
+
+#define regulator_desc_ldo(num) { \
+ .name = "LDO"#num, \
+ .id = MAX8997_LDO##num, \
+ .ops = &max8997_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+}
+#define regulator_desc_buck(num) { \
+ .name = "BUCK"#num, \
+ .id = MAX8997_BUCK##num, \
+ .ops = &max8997_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+}
+
+static struct regulator_desc regulators[] = {
+ regulator_desc_ldo(1),
+ regulator_desc_ldo(2),
+ regulator_desc_ldo(3),
+ regulator_desc_ldo(4),
+ regulator_desc_ldo(5),
+ regulator_desc_ldo(6),
+ regulator_desc_ldo(7),
+ regulator_desc_ldo(8),
+ regulator_desc_ldo(9),
+ regulator_desc_ldo(10),
+ regulator_desc_ldo(11),
+ regulator_desc_ldo(12),
+ regulator_desc_ldo(13),
+ regulator_desc_ldo(14),
+ regulator_desc_ldo(15),
+ regulator_desc_ldo(16),
+ regulator_desc_ldo(17),
+ regulator_desc_ldo(18),
+ regulator_desc_ldo(21),
+ regulator_desc_buck(1),
+ regulator_desc_buck(2),
+ regulator_desc_buck(3),
+ regulator_desc_buck(4),
+ regulator_desc_buck(5),
+ {
+ .name = "BUCK6",
+ .id = MAX8997_BUCK6,
+ .ops = &max8997_fixedvolt_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+ regulator_desc_buck(7),
+ {
+ .name = "EN32KHz AP",
+ .id = MAX8997_EN32KHZ_AP,
+ .ops = &max8997_fixedvolt_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "EN32KHz CP",
+ .id = MAX8997_EN32KHZ_CP,
+ .ops = &max8997_fixedvolt_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "ENVICHG",
+ .id = MAX8997_ENVICHG,
+ .ops = &max8997_fixedvolt_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "ESAFEOUT1",
+ .id = MAX8997_ESAFEOUT1,
+ .ops = &max8997_safeout_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "ESAFEOUT2",
+ .id = MAX8997_ESAFEOUT2,
+ .ops = &max8997_safeout_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "CHARGER CV",
+ .id = MAX8997_CHARGER_CV,
+ .ops = &max8997_fixedstate_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "CHARGER",
+ .id = MAX8997_CHARGER,
+ .ops = &max8997_charger_ops,
+ .type = REGULATOR_CURRENT,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "CHARGER TOPOFF",
+ .id = MAX8997_CHARGER_TOPOFF,
+ .ops = &max8997_charger_fixedstate_ops,
+ .type = REGULATOR_CURRENT,
+ .owner = THIS_MODULE,
+ },
+};
+
+static __devinit int max8997_pmic_probe(struct platform_device *pdev)
+{
+ struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct max8997_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct regulator_dev **rdev;
+ struct max8997_data *max8997;
+ struct i2c_client *i2c;
+ int i, ret, size;
+ u8 max_buck1 = 0, max_buck2 = 0, max_buck5 = 0;
+
+ if (!pdata) {
+ dev_err(pdev->dev.parent, "No platform init data supplied.\n");
+ return -ENODEV;
+ }
+
+ max8997 = kzalloc(sizeof(struct max8997_data), GFP_KERNEL);
+ if (!max8997)
+ return -ENOMEM;
+
+ size = sizeof(struct regulator_dev *) * pdata->num_regulators;
+ max8997->rdev = kzalloc(size, GFP_KERNEL);
+ if (!max8997->rdev) {
+ kfree(max8997);
+ return -ENOMEM;
+ }
+
+ rdev = max8997->rdev;
+ max8997->dev = &pdev->dev;
+ max8997->iodev = iodev;
+ max8997->num_regulators = pdata->num_regulators;
+ platform_set_drvdata(pdev, max8997);
+ i2c = max8997->iodev->i2c;
+
+ max8997->buck125_gpioindex = pdata->buck125_default_idx;
+
+ for (i = 0; i < 8; i++) {
+ max8997->buck1_vol[i] = ret =
+ max8997_get_voltage_proper_val(
+ &buck1245_voltage_map_desc,
+ pdata->buck1_voltage[i] / 1000,
+ pdata->buck1_voltage[i] / 1000 +
+ buck1245_voltage_map_desc.step);
+ if (ret < 0)
+ goto err_alloc;
+
+ max8997->buck2_vol[i] = ret =
+ max8997_get_voltage_proper_val(
+ &buck1245_voltage_map_desc,
+ pdata->buck2_voltage[i] / 1000,
+ pdata->buck2_voltage[i] / 1000 +
+ buck1245_voltage_map_desc.step);
+ if (ret < 0)
+ goto err_alloc;
+
+ max8997->buck5_vol[i] = ret =
+ max8997_get_voltage_proper_val(
+ &buck1245_voltage_map_desc,
+ pdata->buck5_voltage[i] / 1000,
+ pdata->buck5_voltage[i] / 1000 +
+ buck1245_voltage_map_desc.step);
+ if (ret < 0)
+ goto err_alloc;
+
+ if (max_buck1 < max8997->buck1_vol[i])
+ max_buck1 = max8997->buck1_vol[i];
+ if (max_buck2 < max8997->buck2_vol[i])
+ max_buck2 = max8997->buck2_vol[i];
+ if (max_buck5 < max8997->buck5_vol[i])
+ max_buck5 = max8997->buck5_vol[i];
+ }
+
+ /* For the safety, set max voltage before setting up */
+ for (i = 0; i < 8; i++) {
+ max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS(i + 1),
+ max_buck1, 0x3f);
+ max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS(i + 1),
+ max_buck2, 0x3f);
+ max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS(i + 1),
+ max_buck5, 0x3f);
+ }
+
+ /*
+ * If buck 1, 2, and 5 do not care DVS GPIO settings, ignore them.
+ * If at least one of them cares, set gpios.
+ */
+ if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs ||
+ pdata->buck5_gpiodvs) {
+ bool gpio1set = false, gpio2set = false;
+
+ if (!gpio_is_valid(pdata->buck125_gpios[0]) ||
+ !gpio_is_valid(pdata->buck125_gpios[1]) ||
+ !gpio_is_valid(pdata->buck125_gpios[2])) {
+ dev_err(&pdev->dev, "GPIO NOT VALID\n");
+ ret = -EINVAL;
+ goto err_alloc;
+ }
+
+ ret = gpio_request(pdata->buck125_gpios[0],
+ "MAX8997 SET1");
+ if (ret == -EBUSY)
+ dev_warn(&pdev->dev, "Duplicated gpio request"
+ " on SET1\n");
+ else if (ret)
+ goto err_alloc;
+ else
+ gpio1set = true;
+
+ ret = gpio_request(pdata->buck125_gpios[1],
+ "MAX8997 SET2");
+ if (ret == -EBUSY)
+ dev_warn(&pdev->dev, "Duplicated gpio request"
+ " on SET2\n");
+ else if (ret) {
+ if (gpio1set)
+ gpio_free(pdata->buck125_gpios[0]);
+ goto err_alloc;
+ } else
+ gpio2set = true;
+
+ ret = gpio_request(pdata->buck125_gpios[2],
+ "MAX8997 SET3");
+ if (ret == -EBUSY)
+ dev_warn(&pdev->dev, "Duplicated gpio request"
+ " on SET3\n");
+ else if (ret) {
+ if (gpio1set)
+ gpio_free(pdata->buck125_gpios[0]);
+ if (gpio2set)
+ gpio_free(pdata->buck125_gpios[1]);
+ goto err_alloc;
+ }
+
+ gpio_direction_output(pdata->buck125_gpios[0],
+ (max8997->buck125_gpioindex >> 2)
+ & 0x1); /* SET1 */
+ gpio_direction_output(pdata->buck125_gpios[1],
+ (max8997->buck125_gpioindex >> 1)
+ & 0x1); /* SET2 */
+ gpio_direction_output(pdata->buck125_gpios[2],
+ (max8997->buck125_gpioindex >> 0)
+ & 0x1); /* SET3 */
+ ret = 0;
+ }
+
+ /* DVS-GPIO disabled */
+ max8997_update_reg(i2c, MAX8997_REG_BUCK1CTRL, (pdata->buck1_gpiodvs) ?
+ (1 << 1) : (0 << 1), 1 << 1);
+ max8997_update_reg(i2c, MAX8997_REG_BUCK2CTRL, (pdata->buck2_gpiodvs) ?
+ (1 << 1) : (0 << 1), 1 << 1);
+ max8997_update_reg(i2c, MAX8997_REG_BUCK5CTRL, (pdata->buck5_gpiodvs) ?
+ (1 << 1) : (0 << 1), 1 << 1);
+
+ /* Initialize all the DVS related BUCK registers */
+ for (i = 0; i < 8; i++) {
+ max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS(i + 1),
+ max8997->buck1_vol[i],
+ 0x3f);
+ max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS(i + 1),
+ max8997->buck2_vol[i],
+ 0x3f);
+ max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS(i + 1),
+ max8997->buck5_vol[i],
+ 0x3f);
+ }
+
+ for (i = 0; i < pdata->num_regulators; i++) {
+ const struct voltage_map_desc *desc;
+ int id = pdata->regulators[i].id;
+
+ desc = reg_voltage_map[id];
+ if (desc)
+ regulators[id].n_voltages =
+ (desc->max - desc->min) / desc->step + 1;
+ else if (id == MAX8997_ESAFEOUT1 || id == MAX8997_ESAFEOUT2)
+ regulators[id].n_voltages = 4;
+ else if (id == MAX8997_CHARGER_CV)
+ regulators[id].n_voltages = 16;
+
+ rdev[i] = regulator_register(&regulators[id], max8997->dev,
+ pdata->regulators[i].initdata, max8997);
+ if (IS_ERR(rdev[i])) {
+ ret = PTR_ERR(rdev[i]);
+ dev_err(max8997->dev, "regulator init failed for %d\n",
+ id);
+ rdev[i] = NULL;
+ goto err;
+ }
+ }
+
+ /* Misc Settings */
+ max8997->ramp_delay = 10; /* set 10mV/us, which is the default */
+ max8997_write_reg(i2c, MAX8997_REG_BUCKRAMP, (0xf << 4) | 0x9);
+
+ return 0;
+err:
+ for (i = 0; i < max8997->num_regulators; i++)
+ if (rdev[i])
+ regulator_unregister(rdev[i]);
+err_alloc:
+ kfree(max8997->rdev);
+ kfree(max8997);
+
+ return ret;
+}
+
+static int __devexit max8997_pmic_remove(struct platform_device *pdev)
+{
+ struct max8997_data *max8997 = platform_get_drvdata(pdev);
+ struct regulator_dev **rdev = max8997->rdev;
+ int i;
+
+ for (i = 0; i < max8997->num_regulators; i++)
+ if (rdev[i])
+ regulator_unregister(rdev[i]);
+
+ kfree(max8997->rdev);
+ kfree(max8997);
+
+ return 0;
+}
+
+static const struct platform_device_id max8997_pmic_id[] = {
+ { "max8997-pmic", 0},
+ { },
+};
+
+static struct platform_driver max8997_pmic_driver = {
+ .driver = {
+ .name = "max8997-pmic",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8997_pmic_probe,
+ .remove = __devexit_p(max8997_pmic_remove),
+ .id_table = max8997_pmic_id,
+};
+
+static int __init max8997_pmic_init(void)
+{
+ return platform_driver_register(&max8997_pmic_driver);
+}
+subsys_initcall(max8997_pmic_init);
+
+static void __exit max8997_pmic_cleanup(void)
+{
+ platform_driver_unregister(&max8997_pmic_driver);
+}
+module_exit(max8997_pmic_cleanup);
+
+MODULE_DESCRIPTION("MAXIM 8997/8966 Regulator Driver");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index 3e5d0c3b4e5..23249cb0a8b 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -15,6 +15,7 @@
#include <linux/regulator/driver.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
+#include <linux/mfd/core.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
@@ -336,8 +337,7 @@ static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
{
struct mc13xxx_regulator_priv *priv;
struct mc13xxx *mc13783 = dev_get_drvdata(pdev->dev.parent);
- struct mc13783_regulator_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
+ struct mc13783_regulator_platform_data *pdata = mfd_get_data(pdev);
struct mc13783_regulator_init_data *init_data;
int i, ret;
@@ -381,8 +381,7 @@ err:
static int __devexit mc13783_regulator_remove(struct platform_device *pdev)
{
struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
- struct mc13783_regulator_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
+ struct mc13783_regulator_platform_data *pdata = mfd_get_data(pdev);
int i;
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index 1b8f7398a4a..6f15168e5ed 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -15,6 +15,7 @@
#include <linux/regulator/driver.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
+#include <linux/mfd/core.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
@@ -520,8 +521,7 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
{
struct mc13xxx_regulator_priv *priv;
struct mc13xxx *mc13892 = dev_get_drvdata(pdev->dev.parent);
- struct mc13xxx_regulator_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
+ struct mc13xxx_regulator_platform_data *pdata = mfd_get_data(pdev);
struct mc13xxx_regulator_init_data *init_data;
int i, ret;
u32 val;
@@ -595,8 +595,7 @@ err_free:
static int __devexit mc13892_regulator_remove(struct platform_device *pdev)
{
struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
- struct mc13xxx_regulator_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
+ struct mc13xxx_regulator_platform_data *pdata = mfd_get_data(pdev);
int i;
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/regulator/tps6105x-regulator.c b/drivers/regulator/tps6105x-regulator.c
new file mode 100644
index 00000000000..1661499feda
--- /dev/null
+++ b/drivers/regulator/tps6105x-regulator.c
@@ -0,0 +1,196 @@
+/*
+ * Driver for TPS61050/61052 boost converters, typically used for white LEDs
+ * or audio amplifiers.
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps6105x.h>
+
+static const int tps6105x_voltages[] = {
+ 4500000,
+ 5000000,
+ 5250000,
+ 5000000, /* There is an additional 5V */
+};
+
+static int tps6105x_regulator_enable(struct regulator_dev *rdev)
+{
+ struct tps6105x *tps6105x = rdev_get_drvdata(rdev);
+ int ret;
+
+ /* Activate voltage mode */
+ ret = tps6105x_mask_and_set(tps6105x, TPS6105X_REG_0,
+ TPS6105X_REG0_MODE_MASK,
+ TPS6105X_REG0_MODE_VOLTAGE << TPS6105X_REG0_MODE_SHIFT);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tps6105x_regulator_disable(struct regulator_dev *rdev)
+{
+ struct tps6105x *tps6105x = rdev_get_drvdata(rdev);
+ int ret;
+
+ /* Set into shutdown mode */
+ ret = tps6105x_mask_and_set(tps6105x, TPS6105X_REG_0,
+ TPS6105X_REG0_MODE_MASK,
+ TPS6105X_REG0_MODE_SHUTDOWN << TPS6105X_REG0_MODE_SHIFT);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tps6105x_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct tps6105x *tps6105x = rdev_get_drvdata(rdev);
+ u8 regval;
+ int ret;
+
+ ret = tps6105x_get(tps6105x, TPS6105X_REG_0, &regval);
+ if (ret)
+ return ret;
+ regval &= TPS6105X_REG0_MODE_MASK;
+ regval >>= TPS6105X_REG0_MODE_SHIFT;
+
+ if (regval == TPS6105X_REG0_MODE_VOLTAGE)
+ return 1;
+
+ return 0;
+}
+
+static int tps6105x_regulator_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct tps6105x *tps6105x = rdev_get_drvdata(rdev);
+ u8 regval;
+ int ret;
+
+ ret = tps6105x_get(tps6105x, TPS6105X_REG_0, &regval);
+ if (ret)
+ return ret;
+
+ regval &= TPS6105X_REG0_VOLTAGE_MASK;
+ regval >>= TPS6105X_REG0_VOLTAGE_SHIFT;
+ return (int) regval;
+}
+
+static int tps6105x_regulator_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct tps6105x *tps6105x = rdev_get_drvdata(rdev);
+ int ret;
+
+ ret = tps6105x_mask_and_set(tps6105x, TPS6105X_REG_0,
+ TPS6105X_REG0_VOLTAGE_MASK,
+ selector << TPS6105X_REG0_VOLTAGE_SHIFT);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tps6105x_regulator_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ if (selector >= ARRAY_SIZE(tps6105x_voltages))
+ return -EINVAL;
+
+ return tps6105x_voltages[selector];
+}
+
+static struct regulator_ops tps6105x_regulator_ops = {
+ .enable = tps6105x_regulator_enable,
+ .disable = tps6105x_regulator_disable,
+ .is_enabled = tps6105x_regulator_is_enabled,
+ .get_voltage_sel = tps6105x_regulator_get_voltage_sel,
+ .set_voltage_sel = tps6105x_regulator_set_voltage_sel,
+ .list_voltage = tps6105x_regulator_list_voltage,
+};
+
+static struct regulator_desc tps6105x_regulator_desc = {
+ .name = "tps6105x-boost",
+ .ops = &tps6105x_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = 0,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(tps6105x_voltages),
+};
+
+/*
+ * Registers the chip as a voltage regulator
+ */
+static int __devinit tps6105x_regulator_probe(struct platform_device *pdev)
+{
+ struct tps6105x *tps6105x = mfd_get_data(pdev);
+ struct tps6105x_platform_data *pdata = tps6105x->pdata;
+ int ret;
+
+ /* This instance is not set for regulator mode so bail out */
+ if (pdata->mode != TPS6105X_MODE_VOLTAGE) {
+ dev_info(&pdev->dev,
+ "chip not in voltage mode mode, exit probe \n");
+ return 0;
+ }
+
+ /* Register regulator with framework */
+ tps6105x->regulator = regulator_register(&tps6105x_regulator_desc,
+ &tps6105x->client->dev,
+ pdata->regulator_data, tps6105x);
+ if (IS_ERR(tps6105x->regulator)) {
+ ret = PTR_ERR(tps6105x->regulator);
+ dev_err(&tps6105x->client->dev,
+ "failed to register regulator\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __devexit tps6105x_regulator_remove(struct platform_device *pdev)
+{
+ struct tps6105x *tps6105x = platform_get_drvdata(pdev);
+ regulator_unregister(tps6105x->regulator);
+ return 0;
+}
+
+static struct platform_driver tps6105x_regulator_driver = {
+ .driver = {
+ .name = "tps6105x-regulator",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps6105x_regulator_probe,
+ .remove = __devexit_p(tps6105x_regulator_remove),
+};
+
+static __init int tps6105x_regulator_init(void)
+{
+ return platform_driver_register(&tps6105x_regulator_driver);
+}
+subsys_initcall(tps6105x_regulator_init);
+
+static __exit void tps6105x_regulator_exit(void)
+{
+ platform_driver_unregister(&tps6105x_regulator_driver);
+}
+module_exit(tps6105x_regulator_exit);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("TPS6105x regulator driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps6105x-regulator");
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index bd332cf1cc3..6a292852a35 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -475,6 +475,13 @@ static struct regulator_ops twlfixed_ops = {
.get_status = twlreg_get_status,
};
+static struct regulator_ops twl6030_fixed_resource = {
+ .enable = twlreg_enable,
+ .disable = twlreg_disable,
+ .is_enabled = twlreg_is_enabled,
+ .get_status = twlreg_get_status,
+};
+
/*----------------------------------------------------------------------*/
#define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
@@ -538,6 +545,20 @@ static struct regulator_ops twlfixed_ops = {
}, \
}
+#define TWL6030_FIXED_RESOURCE(label, offset, num, turnon_delay, remap_conf) { \
+ .base = offset, \
+ .id = num, \
+ .delay = turnon_delay, \
+ .remap = remap_conf, \
+ .desc = { \
+ .name = #label, \
+ .id = TWL6030_REG_##label, \
+ .ops = &twl6030_fixed_resource, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
/*
* We list regulators here if systems need some level of
* software control over them after boot.
@@ -577,7 +598,8 @@ static struct twlreg_info twl_regs[] = {
TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0, 0x21),
TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0, 0x21),
TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0, 0x21),
- TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0, 0x21)
+ TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0, 0x21),
+ TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 48, 0, 0x21),
};
static int __devinit twlreg_probe(struct platform_device *pdev)
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 06df898842c..857d741539d 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -565,9 +565,8 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
}
irq = platform_get_irq_byname(pdev, "UV");
- ret = wm831x_request_irq(wm831x, irq, wm831x_dcdc_uv_irq,
- IRQF_TRIGGER_RISING, dcdc->name,
- dcdc);
+ ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
+ IRQF_TRIGGER_RISING, dcdc->name, dcdc);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n",
irq, ret);
@@ -575,9 +574,8 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
}
irq = platform_get_irq_byname(pdev, "HC");
- ret = wm831x_request_irq(wm831x, irq, wm831x_dcdc_oc_irq,
- IRQF_TRIGGER_RISING, dcdc->name,
- dcdc);
+ ret = request_threaded_irq(irq, NULL, wm831x_dcdc_oc_irq,
+ IRQF_TRIGGER_RISING, dcdc->name, dcdc);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request HC IRQ %d: %d\n",
irq, ret);
@@ -589,7 +587,7 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
return 0;
err_uv:
- wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc);
+ free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
err_regulator:
regulator_unregister(dcdc->regulator);
err:
@@ -756,9 +754,8 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev)
}
irq = platform_get_irq_byname(pdev, "UV");
- ret = wm831x_request_irq(wm831x, irq, wm831x_dcdc_uv_irq,
- IRQF_TRIGGER_RISING, dcdc->name,
- dcdc);
+ ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
+ IRQF_TRIGGER_RISING, dcdc->name, dcdc);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n",
irq, ret);
@@ -885,9 +882,9 @@ static __devinit int wm831x_boostp_probe(struct platform_device *pdev)
}
irq = platform_get_irq_byname(pdev, "UV");
- ret = wm831x_request_irq(wm831x, irq, wm831x_dcdc_uv_irq,
- IRQF_TRIGGER_RISING, dcdc->name,
- dcdc);
+ ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
+ IRQF_TRIGGER_RISING, dcdc->name,
+ dcdc);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n",
irq, ret);
@@ -908,11 +905,10 @@ err:
static __devexit int wm831x_boostp_remove(struct platform_device *pdev)
{
struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
- struct wm831x *wm831x = dcdc->wm831x;
platform_set_drvdata(pdev, NULL);
- wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc);
+ free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
regulator_unregister(dcdc->regulator);
kfree(dcdc);
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index 6c446cd6ad5..01f27c7f423 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -198,9 +198,8 @@ static __devinit int wm831x_isink_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- ret = wm831x_request_irq(wm831x, irq, wm831x_isink_irq,
- IRQF_TRIGGER_RISING, isink->name,
- isink);
+ ret = request_threaded_irq(irq, NULL, wm831x_isink_irq,
+ IRQF_TRIGGER_RISING, isink->name, isink);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request ISINK IRQ %d: %d\n",
irq, ret);
@@ -221,11 +220,10 @@ err:
static __devexit int wm831x_isink_remove(struct platform_device *pdev)
{
struct wm831x_isink *isink = platform_get_drvdata(pdev);
- struct wm831x *wm831x = isink->wm831x;
platform_set_drvdata(pdev, NULL);
- wm831x_free_irq(wm831x, platform_get_irq(pdev, 0), isink);
+ free_irq(platform_get_irq(pdev, 0), isink);
regulator_unregister(isink->regulator);
kfree(isink);
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index c94fc5b7cd5..2220cf8defb 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -354,9 +354,9 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev)
}
irq = platform_get_irq_byname(pdev, "UV");
- ret = wm831x_request_irq(wm831x, irq, wm831x_ldo_uv_irq,
- IRQF_TRIGGER_RISING, ldo->name,
- ldo);
+ ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq,
+ IRQF_TRIGGER_RISING, ldo->name,
+ ldo);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n",
irq, ret);
@@ -377,11 +377,10 @@ err:
static __devexit int wm831x_gp_ldo_remove(struct platform_device *pdev)
{
struct wm831x_ldo *ldo = platform_get_drvdata(pdev);
- struct wm831x *wm831x = ldo->wm831x;
platform_set_drvdata(pdev, NULL);
- wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), ldo);
+ free_irq(platform_get_irq_byname(pdev, "UV"), ldo);
regulator_unregister(ldo->regulator);
kfree(ldo);
@@ -619,9 +618,8 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev)
}
irq = platform_get_irq_byname(pdev, "UV");
- ret = wm831x_request_irq(wm831x, irq, wm831x_ldo_uv_irq,
- IRQF_TRIGGER_RISING, ldo->name,
- ldo);
+ ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq,
+ IRQF_TRIGGER_RISING, ldo->name, ldo);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n",
irq, ret);
@@ -642,9 +640,8 @@ err:
static __devexit int wm831x_aldo_remove(struct platform_device *pdev)
{
struct wm831x_ldo *ldo = platform_get_drvdata(pdev);
- struct wm831x *wm831x = ldo->wm831x;
- wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), ldo);
+ free_irq(platform_get_irq_byname(pdev, "UV"), ldo);
regulator_unregister(ldo->regulator);
kfree(ldo);
diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c
index bbdb2f02798..baff724cd40 100644
--- a/drivers/rtc/rtc-pl030.c
+++ b/drivers/rtc/rtc-pl030.c
@@ -103,7 +103,7 @@ static const struct rtc_class_ops pl030_ops = {
.set_alarm = pl030_set_alarm,
};
-static int pl030_probe(struct amba_device *dev, struct amba_id *id)
+static int pl030_probe(struct amba_device *dev, const struct amba_id *id)
{
struct pl030_rtc *rtc;
int ret;
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index b7a6690e5b3..6568f4b37e1 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -358,7 +358,7 @@ static int pl031_remove(struct amba_device *adev)
return 0;
}
-static int pl031_probe(struct amba_device *adev, struct amba_id *id)
+static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
struct pl031_local *ldata;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index bb233a9cbad..7b90fc361b5 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -161,13 +161,13 @@ config SPI_IMX_VER_0_0
def_bool y if SOC_IMX21 || SOC_IMX27
config SPI_IMX_VER_0_4
- def_bool y if ARCH_MX31
+ def_bool y if SOC_IMX31
config SPI_IMX_VER_0_7
- def_bool y if ARCH_MX25 || ARCH_MX35 || ARCH_MX51 || ARCH_MX53
+ def_bool y if ARCH_MX25 || SOC_IMX35 || SOC_IMX51 || SOC_IMX53
config SPI_IMX_VER_2_3
- def_bool y if ARCH_MX51 || ARCH_MX53
+ def_bool y if SOC_IMX51 || SOC_IMX53
config SPI_IMX
tristate "Freescale i.MX SPI controllers"
@@ -350,6 +350,16 @@ config SPI_TEGRA
help
SPI driver for NVidia Tegra SoCs
+config SPI_TI_SSP
+ tristate "TI Sequencer Serial Port - SPI Support"
+ depends on MFD_TI_SSP
+ help
+ This selects an SPI master implementation using a TI sequencer
+ serial port.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ti-ssp-spi.
+
config SPI_TOPCLIFF_PCH
tristate "Topcliff PCH SPI Controller"
depends on PCI
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 86d1b5f9bbd..f3f31d98835 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o
obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o
obj-$(CONFIG_SPI_TEGRA) += spi_tegra.o
+obj-$(CONFIG_SPI_TI_SSP) += ti-ssp-spi.o
obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi_topcliff_pch.o
obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index 71a1219a995..95e58c70a2c 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -2021,7 +2021,7 @@ static void pl022_cleanup(struct spi_device *spi)
static int __devinit
-pl022_probe(struct amba_device *adev, struct amba_id *id)
+pl022_probe(struct amba_device *adev, const struct amba_id *id)
{
struct device *dev = &adev->dev;
struct pl022_ssp_controller *platform_info = adev->dev.platform_data;
diff --git a/drivers/spi/davinci_spi.c b/drivers/spi/davinci_spi.c
index 6beab99bf95..166a879fd9e 100644
--- a/drivers/spi/davinci_spi.c
+++ b/drivers/spi/davinci_spi.c
@@ -790,7 +790,6 @@ static int davinci_spi_probe(struct platform_device *pdev)
struct resource *r, *mem;
resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
resource_size_t dma_tx_chan = SPI_NO_RESOURCE;
- resource_size_t dma_eventq = SPI_NO_RESOURCE;
int i = 0, ret = 0;
u32 spipc0;
@@ -878,17 +877,13 @@ static int davinci_spi_probe(struct platform_device *pdev)
r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
if (r)
dma_tx_chan = r->start;
- r = platform_get_resource(pdev, IORESOURCE_DMA, 2);
- if (r)
- dma_eventq = r->start;
dspi->bitbang.txrx_bufs = davinci_spi_bufs;
if (dma_rx_chan != SPI_NO_RESOURCE &&
- dma_tx_chan != SPI_NO_RESOURCE &&
- dma_eventq != SPI_NO_RESOURCE) {
+ dma_tx_chan != SPI_NO_RESOURCE) {
dspi->dma.rx_channel = dma_rx_chan;
dspi->dma.tx_channel = dma_tx_chan;
- dspi->dma.eventq = dma_eventq;
+ dspi->dma.eventq = pdata->dma_event_q;
ret = davinci_spi_request_dma(dspi);
if (ret)
@@ -897,7 +892,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "DMA: supported\n");
dev_info(&pdev->dev, "DMA: RX channel: %d, TX channel: %d, "
"event queue: %d\n", dma_rx_chan, dma_tx_chan,
- dma_eventq);
+ pdata->dma_event_q);
}
dspi->get_rx = davinci_spi_rx_buf_u8;
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index abb1ffbf3d2..36501adc125 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2005, 2006 Nokia Corporation
* Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
- * Juha Yrjölä <juha.yrjola@nokia.com>
+ * Juha Yrj�l� <juha.yrjola@nokia.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -33,6 +33,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
@@ -46,7 +47,6 @@
#define OMAP2_MCSPI_MAX_CTRL 4
#define OMAP2_MCSPI_REVISION 0x00
-#define OMAP2_MCSPI_SYSCONFIG 0x10
#define OMAP2_MCSPI_SYSSTATUS 0x14
#define OMAP2_MCSPI_IRQSTATUS 0x18
#define OMAP2_MCSPI_IRQENABLE 0x1c
@@ -63,13 +63,6 @@
/* per-register bitmasks: */
-#define OMAP2_MCSPI_SYSCONFIG_SMARTIDLE BIT(4)
-#define OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP BIT(2)
-#define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE BIT(0)
-#define OMAP2_MCSPI_SYSCONFIG_SOFTRESET BIT(1)
-
-#define OMAP2_MCSPI_SYSSTATUS_RESETDONE BIT(0)
-
#define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0)
#define OMAP2_MCSPI_MODULCTRL_MS BIT(2)
#define OMAP2_MCSPI_MODULCTRL_STEST BIT(3)
@@ -122,13 +115,12 @@ struct omap2_mcspi {
spinlock_t lock;
struct list_head msg_queue;
struct spi_master *master;
- struct clk *ick;
- struct clk *fck;
/* Virtual base address of the controller */
void __iomem *base;
unsigned long phys;
/* SPI1 has 4 channels, while SPI2 has 2 */
struct omap2_mcspi_dma *dma_channels;
+ struct device *dev;
};
struct omap2_mcspi_cs {
@@ -144,7 +136,6 @@ struct omap2_mcspi_cs {
* corresponding registers are modified.
*/
struct omap2_mcspi_regs {
- u32 sysconfig;
u32 modulctrl;
u32 wakeupenable;
struct list_head cs;
@@ -268,9 +259,6 @@ static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL,
omap2_mcspi_ctx[spi_cntrl->bus_num - 1].modulctrl);
- mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_SYSCONFIG,
- omap2_mcspi_ctx[spi_cntrl->bus_num - 1].sysconfig);
-
mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE,
omap2_mcspi_ctx[spi_cntrl->bus_num - 1].wakeupenable);
@@ -280,20 +268,12 @@ static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
}
static void omap2_mcspi_disable_clocks(struct omap2_mcspi *mcspi)
{
- clk_disable(mcspi->ick);
- clk_disable(mcspi->fck);
+ pm_runtime_put_sync(mcspi->dev);
}
static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi)
{
- if (clk_enable(mcspi->ick))
- return -ENODEV;
- if (clk_enable(mcspi->fck))
- return -ENODEV;
-
- omap2_mcspi_restore_ctx(mcspi);
-
- return 0;
+ return pm_runtime_get_sync(mcspi->dev);
}
static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
@@ -819,8 +799,9 @@ static int omap2_mcspi_setup(struct spi_device *spi)
return ret;
}
- if (omap2_mcspi_enable_clocks(mcspi))
- return -ENODEV;
+ ret = omap2_mcspi_enable_clocks(mcspi);
+ if (ret < 0)
+ return ret;
ret = omap2_mcspi_setup_transfer(spi, NULL);
omap2_mcspi_disable_clocks(mcspi);
@@ -863,10 +844,11 @@ static void omap2_mcspi_work(struct work_struct *work)
struct omap2_mcspi *mcspi;
mcspi = container_of(work, struct omap2_mcspi, work);
- spin_lock_irq(&mcspi->lock);
- if (omap2_mcspi_enable_clocks(mcspi))
- goto out;
+ if (omap2_mcspi_enable_clocks(mcspi) < 0)
+ return;
+
+ spin_lock_irq(&mcspi->lock);
/* We only enable one channel at a time -- the one whose message is
* at the head of the queue -- although this controller would gladly
@@ -979,10 +961,9 @@ static void omap2_mcspi_work(struct work_struct *work)
spin_lock_irq(&mcspi->lock);
}
- omap2_mcspi_disable_clocks(mcspi);
-
-out:
spin_unlock_irq(&mcspi->lock);
+
+ omap2_mcspi_disable_clocks(mcspi);
}
static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
@@ -1058,25 +1039,15 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
return 0;
}
-static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi)
+static int __init omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
{
struct spi_master *master = mcspi->master;
u32 tmp;
+ int ret = 0;
- if (omap2_mcspi_enable_clocks(mcspi))
- return -1;
-
- mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG,
- OMAP2_MCSPI_SYSCONFIG_SOFTRESET);
- do {
- tmp = mcspi_read_reg(master, OMAP2_MCSPI_SYSSTATUS);
- } while (!(tmp & OMAP2_MCSPI_SYSSTATUS_RESETDONE));
-
- tmp = OMAP2_MCSPI_SYSCONFIG_AUTOIDLE |
- OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP |
- OMAP2_MCSPI_SYSCONFIG_SMARTIDLE;
- mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG, tmp);
- omap2_mcspi_ctx[master->bus_num - 1].sysconfig = tmp;
+ ret = omap2_mcspi_enable_clocks(mcspi);
+ if (ret < 0)
+ return ret;
tmp = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, tmp);
@@ -1087,91 +1058,26 @@ static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi)
return 0;
}
-static u8 __initdata spi1_rxdma_id [] = {
- OMAP24XX_DMA_SPI1_RX0,
- OMAP24XX_DMA_SPI1_RX1,
- OMAP24XX_DMA_SPI1_RX2,
- OMAP24XX_DMA_SPI1_RX3,
-};
-
-static u8 __initdata spi1_txdma_id [] = {
- OMAP24XX_DMA_SPI1_TX0,
- OMAP24XX_DMA_SPI1_TX1,
- OMAP24XX_DMA_SPI1_TX2,
- OMAP24XX_DMA_SPI1_TX3,
-};
-
-static u8 __initdata spi2_rxdma_id[] = {
- OMAP24XX_DMA_SPI2_RX0,
- OMAP24XX_DMA_SPI2_RX1,
-};
-
-static u8 __initdata spi2_txdma_id[] = {
- OMAP24XX_DMA_SPI2_TX0,
- OMAP24XX_DMA_SPI2_TX1,
-};
-
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \
- || defined(CONFIG_ARCH_OMAP4)
-static u8 __initdata spi3_rxdma_id[] = {
- OMAP24XX_DMA_SPI3_RX0,
- OMAP24XX_DMA_SPI3_RX1,
-};
+static int omap_mcspi_runtime_resume(struct device *dev)
+{
+ struct omap2_mcspi *mcspi;
+ struct spi_master *master;
-static u8 __initdata spi3_txdma_id[] = {
- OMAP24XX_DMA_SPI3_TX0,
- OMAP24XX_DMA_SPI3_TX1,
-};
-#endif
+ master = dev_get_drvdata(dev);
+ mcspi = spi_master_get_devdata(master);
+ omap2_mcspi_restore_ctx(mcspi);
-#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
-static u8 __initdata spi4_rxdma_id[] = {
- OMAP34XX_DMA_SPI4_RX0,
-};
+ return 0;
+}
-static u8 __initdata spi4_txdma_id[] = {
- OMAP34XX_DMA_SPI4_TX0,
-};
-#endif
static int __init omap2_mcspi_probe(struct platform_device *pdev)
{
struct spi_master *master;
+ struct omap2_mcspi_platform_config *pdata = pdev->dev.platform_data;
struct omap2_mcspi *mcspi;
struct resource *r;
int status = 0, i;
- const u8 *rxdma_id, *txdma_id;
- unsigned num_chipselect;
-
- switch (pdev->id) {
- case 1:
- rxdma_id = spi1_rxdma_id;
- txdma_id = spi1_txdma_id;
- num_chipselect = 4;
- break;
- case 2:
- rxdma_id = spi2_rxdma_id;
- txdma_id = spi2_txdma_id;
- num_chipselect = 2;
- break;
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \
- || defined(CONFIG_ARCH_OMAP4)
- case 3:
- rxdma_id = spi3_rxdma_id;
- txdma_id = spi3_txdma_id;
- num_chipselect = 2;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
- case 4:
- rxdma_id = spi4_rxdma_id;
- txdma_id = spi4_txdma_id;
- num_chipselect = 1;
- break;
-#endif
- default:
- return -EINVAL;
- }
master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
if (master == NULL) {
@@ -1188,7 +1094,7 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
master->setup = omap2_mcspi_setup;
master->transfer = omap2_mcspi_transfer;
master->cleanup = omap2_mcspi_cleanup;
- master->num_chipselect = num_chipselect;
+ master->num_chipselect = pdata->num_cs;
dev_set_drvdata(&pdev->dev, master);
@@ -1206,49 +1112,62 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
goto err1;
}
+ r->start += pdata->regs_offset;
+ r->end += pdata->regs_offset;
mcspi->phys = r->start;
mcspi->base = ioremap(r->start, r->end - r->start + 1);
if (!mcspi->base) {
dev_dbg(&pdev->dev, "can't ioremap MCSPI\n");
status = -ENOMEM;
- goto err1aa;
+ goto err2;
}
+ mcspi->dev = &pdev->dev;
INIT_WORK(&mcspi->work, omap2_mcspi_work);
spin_lock_init(&mcspi->lock);
INIT_LIST_HEAD(&mcspi->msg_queue);
INIT_LIST_HEAD(&omap2_mcspi_ctx[master->bus_num - 1].cs);
- mcspi->ick = clk_get(&pdev->dev, "ick");
- if (IS_ERR(mcspi->ick)) {
- dev_dbg(&pdev->dev, "can't get mcspi_ick\n");
- status = PTR_ERR(mcspi->ick);
- goto err1a;
- }
- mcspi->fck = clk_get(&pdev->dev, "fck");
- if (IS_ERR(mcspi->fck)) {
- dev_dbg(&pdev->dev, "can't get mcspi_fck\n");
- status = PTR_ERR(mcspi->fck);
- goto err2;
- }
-
mcspi->dma_channels = kcalloc(master->num_chipselect,
sizeof(struct omap2_mcspi_dma),
GFP_KERNEL);
if (mcspi->dma_channels == NULL)
- goto err3;
+ goto err2;
+
+ for (i = 0; i < master->num_chipselect; i++) {
+ char dma_ch_name[14];
+ struct resource *dma_res;
+
+ sprintf(dma_ch_name, "rx%d", i);
+ dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+ dma_ch_name);
+ if (!dma_res) {
+ dev_dbg(&pdev->dev, "cannot get DMA RX channel\n");
+ status = -ENODEV;
+ break;
+ }
- for (i = 0; i < num_chipselect; i++) {
mcspi->dma_channels[i].dma_rx_channel = -1;
- mcspi->dma_channels[i].dma_rx_sync_dev = rxdma_id[i];
+ mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start;
+ sprintf(dma_ch_name, "tx%d", i);
+ dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+ dma_ch_name);
+ if (!dma_res) {
+ dev_dbg(&pdev->dev, "cannot get DMA TX channel\n");
+ status = -ENODEV;
+ break;
+ }
+
mcspi->dma_channels[i].dma_tx_channel = -1;
- mcspi->dma_channels[i].dma_tx_sync_dev = txdma_id[i];
+ mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start;
}
- if (omap2_mcspi_reset(mcspi) < 0)
- goto err4;
+ pm_runtime_enable(&pdev->dev);
+
+ if (status || omap2_mcspi_master_setup(mcspi) < 0)
+ goto err3;
status = spi_register_master(master);
if (status < 0)
@@ -1257,17 +1176,13 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
return status;
err4:
- kfree(mcspi->dma_channels);
+ spi_master_put(master);
err3:
- clk_put(mcspi->fck);
+ kfree(mcspi->dma_channels);
err2:
- clk_put(mcspi->ick);
-err1a:
- iounmap(mcspi->base);
-err1aa:
release_mem_region(r->start, (r->end - r->start) + 1);
+ iounmap(mcspi->base);
err1:
- spi_master_put(master);
return status;
}
@@ -1283,9 +1198,7 @@ static int __exit omap2_mcspi_remove(struct platform_device *pdev)
mcspi = spi_master_get_devdata(master);
dma_channels = mcspi->dma_channels;
- clk_put(mcspi->fck);
- clk_put(mcspi->ick);
-
+ omap2_mcspi_disable_clocks(mcspi);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(r->start, (r->end - r->start) + 1);
@@ -1336,6 +1249,7 @@ static int omap2_mcspi_resume(struct device *dev)
static const struct dev_pm_ops omap2_mcspi_pm_ops = {
.resume = omap2_mcspi_resume,
+ .runtime_resume = omap_mcspi_runtime_resume,
};
static struct platform_driver omap2_mcspi_driver = {
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 95928833855..a429b01d028 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -1557,9 +1557,7 @@ static int __devinit pxa2xx_spi_probe(struct platform_device *pdev)
drv_data->ssp = ssp;
master->dev.parent = &pdev->dev;
-#ifdef CONFIG_OF
master->dev.of_node = pdev->dev.of_node;
-#endif
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
diff --git a/drivers/spi/pxa2xx_spi_pci.c b/drivers/spi/pxa2xx_spi_pci.c
index 19752b09e15..378e504f89e 100644
--- a/drivers/spi/pxa2xx_spi_pci.c
+++ b/drivers/spi/pxa2xx_spi_pci.c
@@ -89,9 +89,7 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev,
goto err_nomem;
pdev->dev.parent = &dev->dev;
-#ifdef CONFIG_OF
pdev->dev.of_node = dev->dev.of_node;
-#endif
ssp = &spi_info->ssp;
ssp->phys_base = pci_resource_start(dev, 0);
ssp->mmio_base = ioremap(phys_beg, phys_len);
diff --git a/drivers/spi/ti-ssp-spi.c b/drivers/spi/ti-ssp-spi.c
new file mode 100644
index 00000000000..ee22795c797
--- /dev/null
+++ b/drivers/spi/ti-ssp-spi.c
@@ -0,0 +1,402 @@
+/*
+ * Sequencer Serial Port (SSP) based SPI master driver
+ *
+ * Copyright (C) 2010 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/mfd/ti_ssp.h>
+
+#define MODE_BITS (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH)
+
+struct ti_ssp_spi {
+ struct spi_master *master;
+ struct device *dev;
+ spinlock_t lock;
+ struct list_head msg_queue;
+ struct completion complete;
+ bool shutdown;
+ struct workqueue_struct *workqueue;
+ struct work_struct work;
+ u8 mode, bpw;
+ int cs_active;
+ u32 pc_en, pc_dis, pc_wr, pc_rd;
+ void (*select)(int cs);
+};
+
+static u32 ti_ssp_spi_rx(struct ti_ssp_spi *hw)
+{
+ u32 ret;
+
+ ti_ssp_run(hw->dev, hw->pc_rd, 0, &ret);
+ return ret;
+}
+
+static void ti_ssp_spi_tx(struct ti_ssp_spi *hw, u32 data)
+{
+ ti_ssp_run(hw->dev, hw->pc_wr, data << (32 - hw->bpw), NULL);
+}
+
+static int ti_ssp_spi_txrx(struct ti_ssp_spi *hw, struct spi_message *msg,
+ struct spi_transfer *t)
+{
+ int count;
+
+ if (hw->bpw <= 8) {
+ u8 *rx = t->rx_buf;
+ const u8 *tx = t->tx_buf;
+
+ for (count = 0; count < t->len; count += 1) {
+ if (t->tx_buf)
+ ti_ssp_spi_tx(hw, *tx++);
+ if (t->rx_buf)
+ *rx++ = ti_ssp_spi_rx(hw);
+ }
+ } else if (hw->bpw <= 16) {
+ u16 *rx = t->rx_buf;
+ const u16 *tx = t->tx_buf;
+
+ for (count = 0; count < t->len; count += 2) {
+ if (t->tx_buf)
+ ti_ssp_spi_tx(hw, *tx++);
+ if (t->rx_buf)
+ *rx++ = ti_ssp_spi_rx(hw);
+ }
+ } else {
+ u32 *rx = t->rx_buf;
+ const u32 *tx = t->tx_buf;
+
+ for (count = 0; count < t->len; count += 4) {
+ if (t->tx_buf)
+ ti_ssp_spi_tx(hw, *tx++);
+ if (t->rx_buf)
+ *rx++ = ti_ssp_spi_rx(hw);
+ }
+ }
+
+ msg->actual_length += count; /* bytes transferred */
+
+ dev_dbg(&msg->spi->dev, "xfer %s%s, %d bytes, %d bpw, count %d%s\n",
+ t->tx_buf ? "tx" : "", t->rx_buf ? "rx" : "", t->len,
+ hw->bpw, count, (count < t->len) ? " (under)" : "");
+
+ return (count < t->len) ? -EIO : 0; /* left over data */
+}
+
+static void ti_ssp_spi_chip_select(struct ti_ssp_spi *hw, int cs_active)
+{
+ cs_active = !!cs_active;
+ if (cs_active == hw->cs_active)
+ return;
+ ti_ssp_run(hw->dev, cs_active ? hw->pc_en : hw->pc_dis, 0, NULL);
+ hw->cs_active = cs_active;
+}
+
+#define __SHIFT_OUT(bits) (SSP_OPCODE_SHIFT | SSP_OUT_MODE | \
+ cs_en | clk | SSP_COUNT((bits) * 2 - 1))
+#define __SHIFT_IN(bits) (SSP_OPCODE_SHIFT | SSP_IN_MODE | \
+ cs_en | clk | SSP_COUNT((bits) * 2 - 1))
+
+static int ti_ssp_spi_setup_transfer(struct ti_ssp_spi *hw, u8 bpw, u8 mode)
+{
+ int error, idx = 0;
+ u32 seqram[16];
+ u32 cs_en, cs_dis, clk;
+ u32 topbits, botbits;
+
+ mode &= MODE_BITS;
+ if (mode == hw->mode && bpw == hw->bpw)
+ return 0;
+
+ cs_en = (mode & SPI_CS_HIGH) ? SSP_CS_HIGH : SSP_CS_LOW;
+ cs_dis = (mode & SPI_CS_HIGH) ? SSP_CS_LOW : SSP_CS_HIGH;
+ clk = (mode & SPI_CPOL) ? SSP_CLK_HIGH : SSP_CLK_LOW;
+
+ /* Construct instructions */
+
+ /* Disable Chip Select */
+ hw->pc_dis = idx;
+ seqram[idx++] = SSP_OPCODE_DIRECT | SSP_OUT_MODE | cs_dis | clk;
+ seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_dis | clk;
+
+ /* Enable Chip Select */
+ hw->pc_en = idx;
+ seqram[idx++] = SSP_OPCODE_DIRECT | SSP_OUT_MODE | cs_en | clk;
+ seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk;
+
+ /* Reads and writes need to be split for bpw > 16 */
+ topbits = (bpw > 16) ? 16 : bpw;
+ botbits = bpw - topbits;
+
+ /* Write */
+ hw->pc_wr = idx;
+ seqram[idx++] = __SHIFT_OUT(topbits) | SSP_ADDR_REG;
+ if (botbits)
+ seqram[idx++] = __SHIFT_OUT(botbits) | SSP_DATA_REG;
+ seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk;
+
+ /* Read */
+ hw->pc_rd = idx;
+ if (botbits)
+ seqram[idx++] = __SHIFT_IN(botbits) | SSP_ADDR_REG;
+ seqram[idx++] = __SHIFT_IN(topbits) | SSP_DATA_REG;
+ seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk;
+
+ error = ti_ssp_load(hw->dev, 0, seqram, idx);
+ if (error < 0)
+ return error;
+
+ error = ti_ssp_set_mode(hw->dev, ((mode & SPI_CPHA) ?
+ 0 : SSP_EARLY_DIN));
+ if (error < 0)
+ return error;
+
+ hw->bpw = bpw;
+ hw->mode = mode;
+
+ return error;
+}
+
+static void ti_ssp_spi_work(struct work_struct *work)
+{
+ struct ti_ssp_spi *hw = container_of(work, struct ti_ssp_spi, work);
+
+ spin_lock(&hw->lock);
+
+ while (!list_empty(&hw->msg_queue)) {
+ struct spi_message *m;
+ struct spi_device *spi;
+ struct spi_transfer *t = NULL;
+ int status = 0;
+
+ m = container_of(hw->msg_queue.next, struct spi_message,
+ queue);
+
+ list_del_init(&m->queue);
+
+ spin_unlock(&hw->lock);
+
+ spi = m->spi;
+
+ if (hw->select)
+ hw->select(spi->chip_select);
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ int bpw = spi->bits_per_word;
+ int xfer_status;
+
+ if (t->bits_per_word)
+ bpw = t->bits_per_word;
+
+ if (ti_ssp_spi_setup_transfer(hw, bpw, spi->mode) < 0)
+ break;
+
+ ti_ssp_spi_chip_select(hw, 1);
+
+ xfer_status = ti_ssp_spi_txrx(hw, m, t);
+ if (xfer_status < 0)
+ status = xfer_status;
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ if (t->cs_change)
+ ti_ssp_spi_chip_select(hw, 0);
+ }
+
+ ti_ssp_spi_chip_select(hw, 0);
+ m->status = status;
+ m->complete(m->context);
+
+ spin_lock(&hw->lock);
+ }
+
+ if (hw->shutdown)
+ complete(&hw->complete);
+
+ spin_unlock(&hw->lock);
+}
+
+static int ti_ssp_spi_setup(struct spi_device *spi)
+{
+ if (spi->bits_per_word > 32)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ti_ssp_spi_transfer(struct spi_device *spi, struct spi_message *m)
+{
+ struct ti_ssp_spi *hw;
+ struct spi_transfer *t;
+ int error = 0;
+
+ m->actual_length = 0;
+ m->status = -EINPROGRESS;
+
+ hw = spi_master_get_devdata(spi->master);
+
+ if (list_empty(&m->transfers) || !m->complete)
+ return -EINVAL;
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->len && !(t->rx_buf || t->tx_buf)) {
+ dev_err(&spi->dev, "invalid xfer, no buffer\n");
+ return -EINVAL;
+ }
+
+ if (t->len && t->rx_buf && t->tx_buf) {
+ dev_err(&spi->dev, "invalid xfer, full duplex\n");
+ return -EINVAL;
+ }
+
+ if (t->bits_per_word > 32) {
+ dev_err(&spi->dev, "invalid xfer width %d\n",
+ t->bits_per_word);
+ return -EINVAL;
+ }
+ }
+
+ spin_lock(&hw->lock);
+ if (hw->shutdown) {
+ error = -ESHUTDOWN;
+ goto error_unlock;
+ }
+ list_add_tail(&m->queue, &hw->msg_queue);
+ queue_work(hw->workqueue, &hw->work);
+error_unlock:
+ spin_unlock(&hw->lock);
+ return error;
+}
+
+static int __devinit ti_ssp_spi_probe(struct platform_device *pdev)
+{
+ const struct ti_ssp_spi_data *pdata;
+ struct ti_ssp_spi *hw;
+ struct spi_master *master;
+ struct device *dev = &pdev->dev;
+ int error = 0;
+
+ pdata = dev->platform_data;
+ if (!pdata) {
+ dev_err(dev, "platform data not found\n");
+ return -EINVAL;
+ }
+
+ master = spi_alloc_master(dev, sizeof(struct ti_ssp_spi));
+ if (!master) {
+ dev_err(dev, "cannot allocate SPI master\n");
+ return -ENOMEM;
+ }
+
+ hw = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, hw);
+
+ hw->master = master;
+ hw->dev = dev;
+ hw->select = pdata->select;
+
+ spin_lock_init(&hw->lock);
+ init_completion(&hw->complete);
+ INIT_LIST_HEAD(&hw->msg_queue);
+ INIT_WORK(&hw->work, ti_ssp_spi_work);
+
+ hw->workqueue = create_singlethread_workqueue(dev_name(dev));
+ if (!hw->workqueue) {
+ error = -ENOMEM;
+ dev_err(dev, "work queue creation failed\n");
+ goto error_wq;
+ }
+
+ error = ti_ssp_set_iosel(hw->dev, pdata->iosel);
+ if (error < 0) {
+ dev_err(dev, "io setup failed\n");
+ goto error_iosel;
+ }
+
+ master->bus_num = pdev->id;
+ master->num_chipselect = pdata->num_cs;
+ master->mode_bits = MODE_BITS;
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->setup = ti_ssp_spi_setup;
+ master->transfer = ti_ssp_spi_transfer;
+
+ error = spi_register_master(master);
+ if (error) {
+ dev_err(dev, "master registration failed\n");
+ goto error_reg;
+ }
+
+ return 0;
+
+error_reg:
+error_iosel:
+ destroy_workqueue(hw->workqueue);
+error_wq:
+ spi_master_put(master);
+ return error;
+}
+
+static int __devexit ti_ssp_spi_remove(struct platform_device *pdev)
+{
+ struct ti_ssp_spi *hw = platform_get_drvdata(pdev);
+ int error;
+
+ hw->shutdown = 1;
+ while (!list_empty(&hw->msg_queue)) {
+ error = wait_for_completion_interruptible(&hw->complete);
+ if (error < 0) {
+ hw->shutdown = 0;
+ return error;
+ }
+ }
+ destroy_workqueue(hw->workqueue);
+ spi_unregister_master(hw->master);
+
+ return 0;
+}
+
+static struct platform_driver ti_ssp_spi_driver = {
+ .probe = ti_ssp_spi_probe,
+ .remove = __devexit_p(ti_ssp_spi_remove),
+ .driver = {
+ .name = "ti-ssp-spi",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ti_ssp_spi_init(void)
+{
+ return platform_driver_register(&ti_ssp_spi_driver);
+}
+module_init(ti_ssp_spi_init);
+
+static void __exit ti_ssp_spi_exit(void)
+{
+ platform_driver_unregister(&ti_ssp_spi_driver);
+}
+module_exit(ti_ssp_spi_exit);
+
+MODULE_DESCRIPTION("SSP SPI Master");
+MODULE_AUTHOR("Cyril Chemparathy");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ti-ssp-spi");
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
index 7adaef62a99..c69c6f2c2c5 100644
--- a/drivers/spi/xilinx_spi.c
+++ b/drivers/spi/xilinx_spi.c
@@ -18,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/spi/xilinx_spi.h>
@@ -351,14 +352,12 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#ifdef CONFIG_OF
static const struct of_device_id xilinx_spi_of_match[] = {
{ .compatible = "xlnx,xps-spi-2.00.a", },
{ .compatible = "xlnx,xps-spi-2.00.b", },
{}
};
MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
-#endif
struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
u32 irq, s16 bus_num, int num_cs, int little_endian, int bits_per_word)
@@ -394,9 +393,7 @@ struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
master->bus_num = bus_num;
master->num_chipselect = num_cs;
-#ifdef CONFIG_OF
master->dev.of_node = dev->of_node;
-#endif
xspi->mem = *mem;
xspi->irq = irq;
@@ -474,7 +471,7 @@ static int __devinit xilinx_spi_probe(struct platform_device *dev)
struct spi_master *master;
u8 i;
- pdata = dev->dev.platform_data;
+ pdata = mfd_get_data(dev);
if (pdata) {
num_cs = pdata->num_chipselect;
little_endian = pdata->little_endian;
@@ -539,9 +536,7 @@ static struct platform_driver xilinx_spi_driver = {
.driver = {
.name = XILINX_SPI_NAME,
.owner = THIS_MODULE,
-#ifdef CONFIG_OF
.of_match_table = xilinx_spi_of_match,
-#endif
},
};
diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
index 7284d0c18a4..d502f9835ea 100644
--- a/drivers/staging/usbip/vhci_hcd.c
+++ b/drivers/staging/usbip/vhci_hcd.c
@@ -255,8 +255,8 @@ static inline void hub_descriptor(struct usb_hub_descriptor *desc)
desc->wHubCharacteristics = (__force __u16)
(__constant_cpu_to_le16(0x0001));
desc->bNbrPorts = VHCI_NPORTS;
- desc->bitmap[0] = 0xff;
- desc->bitmap[1] = 0xff;
+ desc->u.hs.DeviceRemovable[0] = 0xff;
+ desc->u.hs.DeviceRemovable[1] = 0xff;
}
static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 2b8334601c8..aa9d3d11031 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -537,8 +537,8 @@ config SERIAL_S3C6400
config SERIAL_S5PV210
tristate "Samsung S5PV210 Serial port support"
- depends on SERIAL_SAMSUNG && (CPU_S5PV210 || CPU_S5P6442 || CPU_S5PV310)
- select SERIAL_SAMSUNG_UARTS_4 if (CPU_S5PV210 || CPU_S5PV310)
+ depends on SERIAL_SAMSUNG && (CPU_S5PV210 || CPU_S5P6442 || CPU_EXYNOS4210)
+ select SERIAL_SAMSUNG_UARTS_4 if (CPU_S5PV210 || CPU_EXYNOS4210)
default y
help
Serial port support for Samsung's S5P Family of SoC's
@@ -1596,4 +1596,19 @@ config SERIAL_PCH_UART
This driver is for PCH(Platform controller Hub) UART of Intel EG20T
which is an IOH(Input/Output Hub) for x86 embedded processor.
Enabling PCH_DMA, this PCH UART works as DMA mode.
+
+config SERIAL_MXS_AUART
+ depends on ARCH_MXS
+ tristate "MXS AUART support"
+ select SERIAL_CORE
+ help
+ This driver supports the MXS Application UART (AUART) port.
+
+config SERIAL_MXS_AUART_CONSOLE
+ bool "MXS AUART console support"
+ depends on SERIAL_MXS_AUART=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Enable a MXS AUART port to be the system console.
+
endmenu
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 8ea92e9c73b..c8550719de5 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -92,3 +92,4 @@ obj-$(CONFIG_SERIAL_MRST_MAX3110) += mrst_max3110.o
obj-$(CONFIG_SERIAL_MFD_HSU) += mfd.o
obj-$(CONFIG_SERIAL_IFX6X60) += ifx6x60.o
obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o
+obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index 2904aa04412..d742dd2c525 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -676,7 +676,7 @@ static struct uart_driver amba_reg = {
.cons = AMBA_CONSOLE,
};
-static int pl010_probe(struct amba_device *dev, struct amba_id *id)
+static int pl010_probe(struct amba_device *dev, const struct amba_id *id)
{
struct uart_amba_port *uap;
void __iomem *base;
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index e76d7d00012..57731e87008 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -96,6 +96,22 @@ static struct vendor_data vendor_st = {
};
/* Deals with DMA transactions */
+
+struct pl011_sgbuf {
+ struct scatterlist sg;
+ char *buf;
+};
+
+struct pl011_dmarx_data {
+ struct dma_chan *chan;
+ struct completion complete;
+ bool use_buf_b;
+ struct pl011_sgbuf sgbuf_a;
+ struct pl011_sgbuf sgbuf_b;
+ dma_cookie_t cookie;
+ bool running;
+};
+
struct pl011_dmatx_data {
struct dma_chan *chan;
struct scatterlist sg;
@@ -120,12 +136,70 @@ struct uart_amba_port {
char type[12];
#ifdef CONFIG_DMA_ENGINE
/* DMA stuff */
- bool using_dma;
+ bool using_tx_dma;
+ bool using_rx_dma;
+ struct pl011_dmarx_data dmarx;
struct pl011_dmatx_data dmatx;
#endif
};
/*
+ * Reads up to 256 characters from the FIFO or until it's empty and
+ * inserts them into the TTY layer. Returns the number of characters
+ * read from the FIFO.
+ */
+static int pl011_fifo_to_tty(struct uart_amba_port *uap)
+{
+ u16 status, ch;
+ unsigned int flag, max_count = 256;
+ int fifotaken = 0;
+
+ while (max_count--) {
+ status = readw(uap->port.membase + UART01x_FR);
+ if (status & UART01x_FR_RXFE)
+ break;
+
+ /* Take chars from the FIFO and update status */
+ ch = readw(uap->port.membase + UART01x_DR) |
+ UART_DUMMY_DR_RX;
+ flag = TTY_NORMAL;
+ uap->port.icount.rx++;
+ fifotaken++;
+
+ if (unlikely(ch & UART_DR_ERROR)) {
+ if (ch & UART011_DR_BE) {
+ ch &= ~(UART011_DR_FE | UART011_DR_PE);
+ uap->port.icount.brk++;
+ if (uart_handle_break(&uap->port))
+ continue;
+ } else if (ch & UART011_DR_PE)
+ uap->port.icount.parity++;
+ else if (ch & UART011_DR_FE)
+ uap->port.icount.frame++;
+ if (ch & UART011_DR_OE)
+ uap->port.icount.overrun++;
+
+ ch &= uap->port.read_status_mask;
+
+ if (ch & UART011_DR_BE)
+ flag = TTY_BREAK;
+ else if (ch & UART011_DR_PE)
+ flag = TTY_PARITY;
+ else if (ch & UART011_DR_FE)
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(&uap->port, ch & 255))
+ continue;
+
+ uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
+ }
+
+ return fifotaken;
+}
+
+
+/*
* All the DMA operation mode stuff goes inside this ifdef.
* This assumes that you have a generic DMA device interface,
* no custom DMA interfaces are supported.
@@ -134,6 +208,31 @@ struct uart_amba_port {
#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
+static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
+ enum dma_data_direction dir)
+{
+ sg->buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL);
+ if (!sg->buf)
+ return -ENOMEM;
+
+ sg_init_one(&sg->sg, sg->buf, PL011_DMA_BUFFER_SIZE);
+
+ if (dma_map_sg(chan->device->dev, &sg->sg, 1, dir) != 1) {
+ kfree(sg->buf);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
+ enum dma_data_direction dir)
+{
+ if (sg->buf) {
+ dma_unmap_sg(chan->device->dev, &sg->sg, 1, dir);
+ kfree(sg->buf);
+ }
+}
+
static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
{
/* DMA is the sole user of the platform data right now */
@@ -153,7 +252,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
return;
}
- /* Try to acquire a generic DMA engine slave channel */
+ /* Try to acquire a generic DMA engine slave TX channel */
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
@@ -168,6 +267,28 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
dev_info(uap->port.dev, "DMA channel TX %s\n",
dma_chan_name(uap->dmatx.chan));
+
+ /* Optionally make use of an RX channel as well */
+ if (plat->dma_rx_param) {
+ struct dma_slave_config rx_conf = {
+ .src_addr = uap->port.mapbase + UART01x_DR,
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .direction = DMA_FROM_DEVICE,
+ .src_maxburst = uap->fifosize >> 1,
+ };
+
+ chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
+ if (!chan) {
+ dev_err(uap->port.dev, "no RX DMA channel!\n");
+ return;
+ }
+
+ dmaengine_slave_config(chan, &rx_conf);
+ uap->dmarx.chan = chan;
+
+ dev_info(uap->port.dev, "DMA channel RX %s\n",
+ dma_chan_name(uap->dmarx.chan));
+ }
}
#ifndef MODULE
@@ -219,9 +340,10 @@ static void pl011_dma_remove(struct uart_amba_port *uap)
/* TODO: remove the initcall if it has not yet executed */
if (uap->dmatx.chan)
dma_release_channel(uap->dmatx.chan);
+ if (uap->dmarx.chan)
+ dma_release_channel(uap->dmarx.chan);
}
-
/* Forward declare this for the refill routine */
static int pl011_dma_tx_refill(struct uart_amba_port *uap);
@@ -380,7 +502,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
*/
static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
{
- if (!uap->using_dma)
+ if (!uap->using_tx_dma)
return false;
/*
@@ -432,7 +554,7 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
{
u16 dmacr;
- if (!uap->using_dma)
+ if (!uap->using_tx_dma)
return false;
if (!uap->port.x_char) {
@@ -492,7 +614,7 @@ static void pl011_dma_flush_buffer(struct uart_port *port)
{
struct uart_amba_port *uap = (struct uart_amba_port *)port;
- if (!uap->using_dma)
+ if (!uap->using_tx_dma)
return;
/* Avoid deadlock with the DMA engine callback */
@@ -508,9 +630,219 @@ static void pl011_dma_flush_buffer(struct uart_port *port)
}
}
+static void pl011_dma_rx_callback(void *data);
+
+static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
+{
+ struct dma_chan *rxchan = uap->dmarx.chan;
+ struct dma_device *dma_dev;
+ struct pl011_dmarx_data *dmarx = &uap->dmarx;
+ struct dma_async_tx_descriptor *desc;
+ struct pl011_sgbuf *sgbuf;
+
+ if (!rxchan)
+ return -EIO;
+
+ /* Start the RX DMA job */
+ sgbuf = uap->dmarx.use_buf_b ?
+ &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
+ dma_dev = rxchan->device;
+ desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1,
+ DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ /*
+ * If the DMA engine is busy and cannot prepare a
+ * channel, no big deal, the driver will fall back
+ * to interrupt mode as a result of this error code.
+ */
+ if (!desc) {
+ uap->dmarx.running = false;
+ dmaengine_terminate_all(rxchan);
+ return -EBUSY;
+ }
+
+ /* Some data to go along to the callback */
+ desc->callback = pl011_dma_rx_callback;
+ desc->callback_param = uap;
+ dmarx->cookie = dmaengine_submit(desc);
+ dma_async_issue_pending(rxchan);
+
+ uap->dmacr |= UART011_RXDMAE;
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+ uap->dmarx.running = true;
+
+ uap->im &= ~UART011_RXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+
+ return 0;
+}
+
+/*
+ * This is called when either the DMA job is complete, or
+ * the FIFO timeout interrupt occurred. This must be called
+ * with the port spinlock uap->port.lock held.
+ */
+static void pl011_dma_rx_chars(struct uart_amba_port *uap,
+ u32 pending, bool use_buf_b,
+ bool readfifo)
+{
+ struct tty_struct *tty = uap->port.state->port.tty;
+ struct pl011_sgbuf *sgbuf = use_buf_b ?
+ &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
+ struct device *dev = uap->dmarx.chan->device->dev;
+ int dma_count = 0;
+ u32 fifotaken = 0; /* only used for vdbg() */
+
+ /* Pick everything from the DMA first */
+ if (pending) {
+ /* Sync in buffer */
+ dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
+
+ /*
+ * First take all chars in the DMA pipe, then look in the FIFO.
+ * Note that tty_insert_flip_buf() tries to take as many chars
+ * as it can.
+ */
+ dma_count = tty_insert_flip_string(uap->port.state->port.tty,
+ sgbuf->buf, pending);
+
+ /* Return buffer to device */
+ dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
+
+ uap->port.icount.rx += dma_count;
+ if (dma_count < pending)
+ dev_warn(uap->port.dev,
+ "couldn't insert all characters (TTY is full?)\n");
+ }
+
+ /*
+ * Only continue with trying to read the FIFO if all DMA chars have
+ * been taken first.
+ */
+ if (dma_count == pending && readfifo) {
+ /* Clear any error flags */
+ writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
+ uap->port.membase + UART011_ICR);
+
+ /*
+ * If we read all the DMA'd characters, and we had an
+ * incomplete buffer, that could be due to an rx error, or
+ * maybe we just timed out. Read any pending chars and check
+ * the error status.
+ *
+ * Error conditions will only occur in the FIFO, these will
+ * trigger an immediate interrupt and stop the DMA job, so we
+ * will always find the error in the FIFO, never in the DMA
+ * buffer.
+ */
+ fifotaken = pl011_fifo_to_tty(uap);
+ }
+
+ spin_unlock(&uap->port.lock);
+ dev_vdbg(uap->port.dev,
+ "Took %d chars from DMA buffer and %d chars from the FIFO\n",
+ dma_count, fifotaken);
+ tty_flip_buffer_push(tty);
+ spin_lock(&uap->port.lock);
+}
+
+static void pl011_dma_rx_irq(struct uart_amba_port *uap)
+{
+ struct pl011_dmarx_data *dmarx = &uap->dmarx;
+ struct dma_chan *rxchan = dmarx->chan;
+ struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
+ &dmarx->sgbuf_b : &dmarx->sgbuf_a;
+ size_t pending;
+ struct dma_tx_state state;
+ enum dma_status dmastat;
+
+ /*
+ * Pause the transfer so we can trust the current counter,
+ * do this before we pause the PL011 block, else we may
+ * overflow the FIFO.
+ */
+ if (dmaengine_pause(rxchan))
+ dev_err(uap->port.dev, "unable to pause DMA transfer\n");
+ dmastat = rxchan->device->device_tx_status(rxchan,
+ dmarx->cookie, &state);
+ if (dmastat != DMA_PAUSED)
+ dev_err(uap->port.dev, "unable to pause DMA transfer\n");
+
+ /* Disable RX DMA - incoming data will wait in the FIFO */
+ uap->dmacr &= ~UART011_RXDMAE;
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+ uap->dmarx.running = false;
+
+ pending = sgbuf->sg.length - state.residue;
+ BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
+ /* Then we terminate the transfer - we now know our residue */
+ dmaengine_terminate_all(rxchan);
+
+ /*
+ * This will take the chars we have so far and insert
+ * into the framework.
+ */
+ pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
+
+ /* Switch buffer & re-trigger DMA job */
+ dmarx->use_buf_b = !dmarx->use_buf_b;
+ if (pl011_dma_rx_trigger_dma(uap)) {
+ dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
+ "fall back to interrupt mode\n");
+ uap->im |= UART011_RXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+ }
+}
+
+static void pl011_dma_rx_callback(void *data)
+{
+ struct uart_amba_port *uap = data;
+ struct pl011_dmarx_data *dmarx = &uap->dmarx;
+ bool lastbuf = dmarx->use_buf_b;
+ int ret;
+
+ /*
+ * This completion interrupt occurs typically when the
+ * RX buffer is totally stuffed but no timeout has yet
+ * occurred. When that happens, we just want the RX
+ * routine to flush out the secondary DMA buffer while
+ * we immediately trigger the next DMA job.
+ */
+ spin_lock_irq(&uap->port.lock);
+ uap->dmarx.running = false;
+ dmarx->use_buf_b = !lastbuf;
+ ret = pl011_dma_rx_trigger_dma(uap);
+
+ pl011_dma_rx_chars(uap, PL011_DMA_BUFFER_SIZE, lastbuf, false);
+ spin_unlock_irq(&uap->port.lock);
+ /*
+ * Do this check after we picked the DMA chars so we don't
+ * get some IRQ immediately from RX.
+ */
+ if (ret) {
+ dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
+ "fall back to interrupt mode\n");
+ uap->im |= UART011_RXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+ }
+}
+
+/*
+ * Stop accepting received characters, when we're shutting down or
+ * suspending this port.
+ * Locking: called with port lock held and IRQs disabled.
+ */
+static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
+{
+ /* FIXME. Just disable the DMA enable */
+ uap->dmacr &= ~UART011_RXDMAE;
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+}
static void pl011_dma_startup(struct uart_amba_port *uap)
{
+ int ret;
+
if (!uap->dmatx.chan)
return;
@@ -525,8 +857,33 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
/* The DMA buffer is now the FIFO the TTY subsystem can use */
uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
- uap->using_dma = true;
+ uap->using_tx_dma = true;
+
+ if (!uap->dmarx.chan)
+ goto skip_rx;
+
+ /* Allocate and map DMA RX buffers */
+ ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
+ DMA_FROM_DEVICE);
+ if (ret) {
+ dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
+ "RX buffer A", ret);
+ goto skip_rx;
+ }
+
+ ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
+ DMA_FROM_DEVICE);
+ if (ret) {
+ dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
+ "RX buffer B", ret);
+ pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
+ DMA_FROM_DEVICE);
+ goto skip_rx;
+ }
+ uap->using_rx_dma = true;
+
+skip_rx:
/* Turn on DMA error (RX/TX will be enabled on demand) */
uap->dmacr |= UART011_DMAONERR;
writew(uap->dmacr, uap->port.membase + UART011_DMACR);
@@ -539,11 +896,17 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
if (uap->vendor->dma_threshold)
writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
uap->port.membase + ST_UART011_DMAWM);
+
+ if (uap->using_rx_dma) {
+ if (pl011_dma_rx_trigger_dma(uap))
+ dev_dbg(uap->port.dev, "could not trigger initial "
+ "RX DMA job, fall back to interrupt mode\n");
+ }
}
static void pl011_dma_shutdown(struct uart_amba_port *uap)
{
- if (!uap->using_dma)
+ if (!(uap->using_tx_dma || uap->using_rx_dma))
return;
/* Disable RX and TX DMA */
@@ -555,19 +918,39 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
writew(uap->dmacr, uap->port.membase + UART011_DMACR);
spin_unlock_irq(&uap->port.lock);
- /* In theory, this should already be done by pl011_dma_flush_buffer */
- dmaengine_terminate_all(uap->dmatx.chan);
- if (uap->dmatx.queued) {
- dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
- DMA_TO_DEVICE);
- uap->dmatx.queued = false;
+ if (uap->using_tx_dma) {
+ /* In theory, this should already be done by pl011_dma_flush_buffer */
+ dmaengine_terminate_all(uap->dmatx.chan);
+ if (uap->dmatx.queued) {
+ dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
+ DMA_TO_DEVICE);
+ uap->dmatx.queued = false;
+ }
+
+ kfree(uap->dmatx.buf);
+ uap->using_tx_dma = false;
}
- kfree(uap->dmatx.buf);
+ if (uap->using_rx_dma) {
+ dmaengine_terminate_all(uap->dmarx.chan);
+ /* Clean up the RX DMA */
+ pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
+ pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
+ uap->using_rx_dma = false;
+ }
+}
- uap->using_dma = false;
+static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
+{
+ return uap->using_rx_dma;
}
+static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
+{
+ return uap->using_rx_dma && uap->dmarx.running;
+}
+
+
#else
/* Blank functions if the DMA engine is not available */
static inline void pl011_dma_probe(struct uart_amba_port *uap)
@@ -600,6 +983,29 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
return false;
}
+static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
+{
+}
+
+static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
+{
+}
+
+static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
+{
+ return -EIO;
+}
+
+static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
+{
+ return false;
+}
+
+static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
+{
+ return false;
+}
+
#define pl011_dma_flush_buffer NULL
#endif
@@ -630,6 +1036,8 @@ static void pl011_stop_rx(struct uart_port *port)
uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
UART011_PEIM|UART011_BEIM|UART011_OEIM);
writew(uap->im, uap->port.membase + UART011_IMSC);
+
+ pl011_dma_rx_stop(uap);
}
static void pl011_enable_ms(struct uart_port *port)
@@ -643,51 +1051,24 @@ static void pl011_enable_ms(struct uart_port *port)
static void pl011_rx_chars(struct uart_amba_port *uap)
{
struct tty_struct *tty = uap->port.state->port.tty;
- unsigned int status, ch, flag, max_count = 256;
-
- status = readw(uap->port.membase + UART01x_FR);
- while ((status & UART01x_FR_RXFE) == 0 && max_count--) {
- ch = readw(uap->port.membase + UART01x_DR) | UART_DUMMY_DR_RX;
- flag = TTY_NORMAL;
- uap->port.icount.rx++;
-
- /*
- * Note that the error handling code is
- * out of the main execution path
- */
- if (unlikely(ch & UART_DR_ERROR)) {
- if (ch & UART011_DR_BE) {
- ch &= ~(UART011_DR_FE | UART011_DR_PE);
- uap->port.icount.brk++;
- if (uart_handle_break(&uap->port))
- goto ignore_char;
- } else if (ch & UART011_DR_PE)
- uap->port.icount.parity++;
- else if (ch & UART011_DR_FE)
- uap->port.icount.frame++;
- if (ch & UART011_DR_OE)
- uap->port.icount.overrun++;
-
- ch &= uap->port.read_status_mask;
-
- if (ch & UART011_DR_BE)
- flag = TTY_BREAK;
- else if (ch & UART011_DR_PE)
- flag = TTY_PARITY;
- else if (ch & UART011_DR_FE)
- flag = TTY_FRAME;
- }
- if (uart_handle_sysrq_char(&uap->port, ch & 255))
- goto ignore_char;
-
- uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
+ pl011_fifo_to_tty(uap);
- ignore_char:
- status = readw(uap->port.membase + UART01x_FR);
- }
spin_unlock(&uap->port.lock);
tty_flip_buffer_push(tty);
+ /*
+ * If we were temporarily out of DMA mode for a while,
+ * attempt to switch back to DMA mode again.
+ */
+ if (pl011_dma_rx_available(uap)) {
+ if (pl011_dma_rx_trigger_dma(uap)) {
+ dev_dbg(uap->port.dev, "could not trigger RX DMA job "
+ "fall back to interrupt mode again\n");
+ uap->im |= UART011_RXIM;
+ } else
+ uap->im &= ~UART011_RXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+ }
spin_lock(&uap->port.lock);
}
@@ -767,8 +1148,12 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
UART011_RXIS),
uap->port.membase + UART011_ICR);
- if (status & (UART011_RTIS|UART011_RXIS))
- pl011_rx_chars(uap);
+ if (status & (UART011_RTIS|UART011_RXIS)) {
+ if (pl011_dma_rx_running(uap))
+ pl011_dma_rx_irq(uap);
+ else
+ pl011_rx_chars(uap);
+ }
if (status & (UART011_DSRMIS|UART011_DCDMIS|
UART011_CTSMIS|UART011_RIMIS))
pl011_modem_status(uap);
@@ -945,10 +1330,14 @@ static int pl011_startup(struct uart_port *port)
pl011_dma_startup(uap);
/*
- * Finally, enable interrupts
+ * Finally, enable interrupts, only timeouts when using DMA
+ * if initial RX DMA job failed, start in interrupt mode
+ * as well.
*/
spin_lock_irq(&uap->port.lock);
- uap->im = UART011_RXIM | UART011_RTIM;
+ uap->im = UART011_RTIM;
+ if (!pl011_dma_rx_running(uap))
+ uap->im |= UART011_RXIM;
writew(uap->im, uap->port.membase + UART011_IMSC);
spin_unlock_irq(&uap->port.lock);
@@ -1349,7 +1738,7 @@ static struct uart_driver amba_reg = {
.cons = AMBA_CONSOLE,
};
-static int pl011_probe(struct amba_device *dev, struct amba_id *id)
+static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
{
struct uart_amba_port *uap;
struct vendor_data *vendor = id->data;
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 8e43a7b69e6..bfee9b4c666 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2007 Google, Inc.
* Author: Robert Love <rlove@google.com>
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -31,6 +32,7 @@
#include <linux/serial.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
+#include <linux/delay.h>
#include "msm_serial.h"
@@ -38,9 +40,20 @@ struct msm_port {
struct uart_port uart;
char name[16];
struct clk *clk;
+ struct clk *pclk;
unsigned int imr;
+ unsigned int *gsbi_base;
+ int is_uartdm;
+ unsigned int old_snap_state;
};
+static inline void wait_for_xmitr(struct uart_port *port, int bits)
+{
+ if (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY))
+ while ((msm_read(port, UART_ISR) & bits) != bits)
+ cpu_relax();
+}
+
static void msm_stop_tx(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
@@ -73,6 +86,61 @@ static void msm_enable_ms(struct uart_port *port)
msm_write(port, msm_port->imr, UART_IMR);
}
+static void handle_rx_dm(struct uart_port *port, unsigned int misr)
+{
+ struct tty_struct *tty = port->state->port.tty;
+ unsigned int sr;
+ int count = 0;
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) {
+ port->icount.overrun++;
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ }
+
+ if (misr & UART_IMR_RXSTALE) {
+ count = msm_read(port, UARTDM_RX_TOTAL_SNAP) -
+ msm_port->old_snap_state;
+ msm_port->old_snap_state = 0;
+ } else {
+ count = 4 * (msm_read(port, UART_RFWR));
+ msm_port->old_snap_state += count;
+ }
+
+ /* TODO: Precise error reporting */
+
+ port->icount.rx += count;
+
+ while (count > 0) {
+ unsigned int c;
+
+ sr = msm_read(port, UART_SR);
+ if ((sr & UART_SR_RX_READY) == 0) {
+ msm_port->old_snap_state -= count;
+ break;
+ }
+ c = msm_read(port, UARTDM_RF);
+ if (sr & UART_SR_RX_BREAK) {
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ continue;
+ } else if (sr & UART_SR_PAR_FRAME_ERR)
+ port->icount.frame++;
+
+ /* TODO: handle sysrq */
+ tty_insert_flip_string(tty, (char *) &c,
+ (count > 4) ? 4 : count);
+ count -= 4;
+ }
+
+ tty_flip_buffer_push(tty);
+ if (misr & (UART_IMR_RXSTALE))
+ msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(port, 0xFFFFFF, UARTDM_DMRX);
+ msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+}
+
static void handle_rx(struct uart_port *port)
{
struct tty_struct *tty = port->state->port.tty;
@@ -121,6 +189,12 @@ static void handle_rx(struct uart_port *port)
tty_flip_buffer_push(tty);
}
+static void reset_dm_count(struct uart_port *port)
+{
+ wait_for_xmitr(port, UART_ISR_TX_READY);
+ msm_write(port, 1, UARTDM_NCF_TX);
+}
+
static void handle_tx(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
@@ -128,11 +202,18 @@ static void handle_tx(struct uart_port *port)
int sent_tx;
if (port->x_char) {
- msm_write(port, port->x_char, UART_TF);
+ if (msm_port->is_uartdm)
+ reset_dm_count(port);
+
+ msm_write(port, port->x_char,
+ msm_port->is_uartdm ? UARTDM_TF : UART_TF);
port->icount.tx++;
port->x_char = 0;
}
+ if (msm_port->is_uartdm)
+ reset_dm_count(port);
+
while (msm_read(port, UART_SR) & UART_SR_TX_READY) {
if (uart_circ_empty(xmit)) {
/* disable tx interrupts */
@@ -140,8 +221,11 @@ static void handle_tx(struct uart_port *port)
msm_write(port, msm_port->imr, UART_IMR);
break;
}
+ msm_write(port, xmit->buf[xmit->tail],
+ msm_port->is_uartdm ? UARTDM_TF : UART_TF);
- msm_write(port, xmit->buf[xmit->tail], UART_TF);
+ if (msm_port->is_uartdm)
+ reset_dm_count(port);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
@@ -169,8 +253,12 @@ static irqreturn_t msm_irq(int irq, void *dev_id)
misr = msm_read(port, UART_MISR);
msm_write(port, 0, UART_IMR); /* disable interrupt */
- if (misr & (UART_IMR_RXLEV | UART_IMR_RXSTALE))
- handle_rx(port);
+ if (misr & (UART_IMR_RXLEV | UART_IMR_RXSTALE)) {
+ if (msm_port->is_uartdm)
+ handle_rx_dm(port, misr);
+ else
+ handle_rx(port);
+ }
if (misr & UART_IMR_TXLEV)
handle_tx(port);
if (misr & UART_IMR_DELTA_CTS)
@@ -192,10 +280,21 @@ static unsigned int msm_get_mctrl(struct uart_port *port)
return TIOCM_CAR | TIOCM_CTS | TIOCM_DSR | TIOCM_RTS;
}
-static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
+
+static void msm_reset(struct uart_port *port)
{
- unsigned int mr;
+ /* reset everything */
+ msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
+ msm_write(port, UART_CR_CMD_SET_RFR, UART_CR);
+}
+void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ unsigned int mr;
mr = msm_read(port, UART_MR1);
if (!(mctrl & TIOCM_RTS)) {
@@ -219,6 +318,7 @@ static void msm_break_ctl(struct uart_port *port, int break_ctl)
static int msm_set_baud_rate(struct uart_port *port, unsigned int baud)
{
unsigned int baud_code, rxstale, watermark;
+ struct msm_port *msm_port = UART_TO_MSM(port);
switch (baud) {
case 300:
@@ -273,6 +373,9 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud)
break;
}
+ if (msm_port->is_uartdm)
+ msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
+
msm_write(port, baud_code, UART_CSR);
/* RX stale watermark */
@@ -288,25 +391,23 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud)
/* set TX watermark */
msm_write(port, 10, UART_TFWR);
+ if (msm_port->is_uartdm) {
+ msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(port, 0xFFFFFF, UARTDM_DMRX);
+ msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+ }
+
return baud;
}
-static void msm_reset(struct uart_port *port)
-{
- /* reset everything */
- msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
- msm_write(port, UART_CR_CMD_SET_RFR, UART_CR);
-}
static void msm_init_clock(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
clk_enable(msm_port->clk);
+ if (!IS_ERR(msm_port->pclk))
+ clk_enable(msm_port->pclk);
msm_serial_set_mnd_regs(port);
}
@@ -347,15 +448,31 @@ static int msm_startup(struct uart_port *port)
msm_write(port, data, UART_IPR);
}
- msm_reset(port);
+ data = 0;
+ if (!port->cons || (port->cons && !(port->cons->flags & CON_ENABLED))) {
+ msm_write(port, UART_CR_CMD_PROTECTION_EN, UART_CR);
+ msm_reset(port);
+ data = UART_CR_TX_ENABLE;
+ }
+
+ data |= UART_CR_RX_ENABLE;
+ msm_write(port, data, UART_CR); /* enable TX & RX */
- msm_write(port, 0x05, UART_CR); /* enable TX & RX */
+ /* Make sure IPR is not 0 to start with*/
+ if (msm_port->is_uartdm)
+ msm_write(port, UART_IPR_STALE_LSB, UART_IPR);
/* turn on RX and CTS interrupts */
msm_port->imr = UART_IMR_RXLEV | UART_IMR_RXSTALE |
UART_IMR_CURRENT_CTS;
- msm_write(port, msm_port->imr, UART_IMR);
+ if (msm_port->is_uartdm) {
+ msm_write(port, 0xFFFFFF, UARTDM_DMRX);
+ msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+ }
+
+ msm_write(port, msm_port->imr, UART_IMR);
return 0;
}
@@ -384,7 +501,7 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
baud = msm_set_baud_rate(port, baud);
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
-
+
/* calculate parity */
mr = msm_read(port, UART_MR2);
mr &= ~UART_MR2_PARITY_MODE;
@@ -454,48 +571,105 @@ static const char *msm_type(struct uart_port *port)
static void msm_release_port(struct uart_port *port)
{
struct platform_device *pdev = to_platform_device(port->dev);
- struct resource *resource;
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ struct resource *uart_resource;
+ struct resource *gsbi_resource;
resource_size_t size;
- resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (unlikely(!resource))
+ uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!uart_resource))
return;
- size = resource->end - resource->start + 1;
+ size = resource_size(uart_resource);
release_mem_region(port->mapbase, size);
iounmap(port->membase);
port->membase = NULL;
+
+ if (msm_port->gsbi_base) {
+ iowrite32(GSBI_PROTOCOL_IDLE, msm_port->gsbi_base +
+ GSBI_CONTROL);
+
+ gsbi_resource = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "gsbi_resource");
+
+ if (unlikely(!gsbi_resource))
+ return;
+
+ size = resource_size(gsbi_resource);
+ release_mem_region(gsbi_resource->start, size);
+ iounmap(msm_port->gsbi_base);
+ msm_port->gsbi_base = NULL;
+ }
}
static int msm_request_port(struct uart_port *port)
{
+ struct msm_port *msm_port = UART_TO_MSM(port);
struct platform_device *pdev = to_platform_device(port->dev);
- struct resource *resource;
+ struct resource *uart_resource;
+ struct resource *gsbi_resource;
resource_size_t size;
+ int ret;
- resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (unlikely(!resource))
+ uart_resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "uart_resource");
+ if (unlikely(!uart_resource))
return -ENXIO;
- size = resource->end - resource->start + 1;
- if (unlikely(!request_mem_region(port->mapbase, size, "msm_serial")))
+ size = resource_size(uart_resource);
+
+ if (!request_mem_region(port->mapbase, size, "msm_serial"))
return -EBUSY;
port->membase = ioremap(port->mapbase, size);
if (!port->membase) {
- release_mem_region(port->mapbase, size);
- return -EBUSY;
+ ret = -EBUSY;
+ goto fail_release_port;
+ }
+
+ gsbi_resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "gsbi_resource");
+ /* Is this a GSBI-based port? */
+ if (gsbi_resource) {
+ size = resource_size(gsbi_resource);
+
+ if (!request_mem_region(gsbi_resource->start, size,
+ "msm_serial")) {
+ ret = -EBUSY;
+ goto fail_release_port;
+ }
+
+ msm_port->gsbi_base = ioremap(gsbi_resource->start, size);
+ if (!msm_port->gsbi_base) {
+ ret = -EBUSY;
+ goto fail_release_gsbi;
+ }
}
return 0;
+
+fail_release_gsbi:
+ release_mem_region(gsbi_resource->start, size);
+fail_release_port:
+ release_mem_region(port->mapbase, size);
+ return ret;
}
static void msm_config_port(struct uart_port *port, int flags)
{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ int ret;
if (flags & UART_CONFIG_TYPE) {
port->type = PORT_MSM;
- msm_request_port(port);
+ ret = msm_request_port(port);
+ if (ret)
+ return;
}
+
+ if (msm_port->is_uartdm)
+ iowrite32(GSBI_PROTOCOL_UART, msm_port->gsbi_base +
+ GSBI_CONTROL);
}
static int msm_verify_port(struct uart_port *port, struct serial_struct *ser)
@@ -515,9 +689,13 @@ static void msm_power(struct uart_port *port, unsigned int state,
switch (state) {
case 0:
clk_enable(msm_port->clk);
+ if (!IS_ERR(msm_port->pclk))
+ clk_enable(msm_port->pclk);
break;
case 3:
clk_disable(msm_port->clk);
+ if (!IS_ERR(msm_port->pclk))
+ clk_disable(msm_port->pclk);
break;
default:
printk(KERN_ERR "msm_serial: Unknown PM state %d\n", state);
@@ -550,7 +728,7 @@ static struct msm_port msm_uart_ports[] = {
.iotype = UPIO_MEM,
.ops = &msm_uart_pops,
.flags = UPF_BOOT_AUTOCONF,
- .fifosize = 512,
+ .fifosize = 64,
.line = 0,
},
},
@@ -559,7 +737,7 @@ static struct msm_port msm_uart_ports[] = {
.iotype = UPIO_MEM,
.ops = &msm_uart_pops,
.flags = UPF_BOOT_AUTOCONF,
- .fifosize = 512,
+ .fifosize = 64,
.line = 1,
},
},
@@ -585,9 +763,14 @@ static inline struct uart_port *get_port_from_line(unsigned int line)
static void msm_console_putchar(struct uart_port *port, int c)
{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ if (msm_port->is_uartdm)
+ reset_dm_count(port);
+
while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
;
- msm_write(port, c, UART_TF);
+ msm_write(port, c, msm_port->is_uartdm ? UARTDM_TF : UART_TF);
}
static void msm_console_write(struct console *co, const char *s,
@@ -609,12 +792,14 @@ static void msm_console_write(struct console *co, const char *s,
static int __init msm_console_setup(struct console *co, char *options)
{
struct uart_port *port;
+ struct msm_port *msm_port;
int baud, flow, bits, parity;
if (unlikely(co->index >= UART_NR || co->index < 0))
return -ENXIO;
port = get_port_from_line(co->index);
+ msm_port = UART_TO_MSM(port);
if (unlikely(!port->membase))
return -ENXIO;
@@ -638,6 +823,11 @@ static int __init msm_console_setup(struct console *co, char *options)
msm_reset(port);
+ if (msm_port->is_uartdm) {
+ msm_write(port, UART_CR_CMD_PROTECTION_EN, UART_CR);
+ msm_write(port, UART_CR_TX_ENABLE, UART_CR);
+ }
+
printk(KERN_INFO "msm_serial: console setup on port #%d\n", port->line);
return uart_set_options(port, co, baud, parity, bits, flow);
@@ -685,14 +875,32 @@ static int __init msm_serial_probe(struct platform_device *pdev)
port->dev = &pdev->dev;
msm_port = UART_TO_MSM(port);
- msm_port->clk = clk_get(&pdev->dev, "uart_clk");
- if (IS_ERR(msm_port->clk))
- return PTR_ERR(msm_port->clk);
+ if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsbi_resource"))
+ msm_port->is_uartdm = 1;
+ else
+ msm_port->is_uartdm = 0;
+
+ if (msm_port->is_uartdm) {
+ msm_port->clk = clk_get(&pdev->dev, "gsbi_uart_clk");
+ msm_port->pclk = clk_get(&pdev->dev, "gsbi_pclk");
+ } else {
+ msm_port->clk = clk_get(&pdev->dev, "uart_clk");
+ msm_port->pclk = ERR_PTR(-ENOENT);
+ }
+
+ if (unlikely(IS_ERR(msm_port->clk) || (IS_ERR(msm_port->pclk) &&
+ msm_port->is_uartdm)))
+ return PTR_ERR(msm_port->clk);
+
+ if (msm_port->is_uartdm)
+ clk_set_rate(msm_port->clk, 7372800);
+
port->uartclk = clk_get_rate(msm_port->clk);
printk(KERN_INFO "uartclk = %d\n", port->uartclk);
- resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "uart_resource");
if (unlikely(!resource))
return -ENXIO;
port->mapbase = resource->start;
diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
index f6ca9ca79e9..9b8dc5d0d85 100644
--- a/drivers/tty/serial/msm_serial.h
+++ b/drivers/tty/serial/msm_serial.h
@@ -3,6 +3,7 @@
*
* Copyright (C) 2007 Google, Inc.
* Author: Robert Love <rlove@google.com>
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -54,6 +55,7 @@
#define UART_CSR_300 0x22
#define UART_TF 0x000C
+#define UARTDM_TF 0x0070
#define UART_CR 0x0010
#define UART_CR_CMD_NULL (0 << 4)
@@ -64,14 +66,17 @@
#define UART_CR_CMD_START_BREAK (5 << 4)
#define UART_CR_CMD_STOP_BREAK (6 << 4)
#define UART_CR_CMD_RESET_CTS (7 << 4)
+#define UART_CR_CMD_RESET_STALE_INT (8 << 4)
#define UART_CR_CMD_PACKET_MODE (9 << 4)
#define UART_CR_CMD_MODE_RESET (12 << 4)
#define UART_CR_CMD_SET_RFR (13 << 4)
#define UART_CR_CMD_RESET_RFR (14 << 4)
+#define UART_CR_CMD_PROTECTION_EN (16 << 4)
+#define UART_CR_CMD_STALE_EVENT_ENABLE (80 << 4)
#define UART_CR_TX_DISABLE (1 << 3)
-#define UART_CR_TX_ENABLE (1 << 3)
-#define UART_CR_RX_DISABLE (1 << 3)
-#define UART_CR_RX_ENABLE (1 << 3)
+#define UART_CR_TX_ENABLE (1 << 2)
+#define UART_CR_RX_DISABLE (1 << 1)
+#define UART_CR_RX_ENABLE (1 << 0)
#define UART_IMR 0x0014
#define UART_IMR_TXLEV (1 << 0)
@@ -110,9 +115,20 @@
#define UART_SR_RX_FULL (1 << 1)
#define UART_SR_RX_READY (1 << 0)
-#define UART_RF 0x000C
-#define UART_MISR 0x0010
-#define UART_ISR 0x0014
+#define UART_RF 0x000C
+#define UARTDM_RF 0x0070
+#define UART_MISR 0x0010
+#define UART_ISR 0x0014
+#define UART_ISR_TX_READY (1 << 7)
+
+#define GSBI_CONTROL 0x0
+#define GSBI_PROTOCOL_CODE 0x30
+#define GSBI_PROTOCOL_UART 0x40
+#define GSBI_PROTOCOL_IDLE 0x0
+
+#define UARTDM_DMRX 0x34
+#define UARTDM_NCF_TX 0x40
+#define UARTDM_RX_TOTAL_SNAP 0x38
#define UART_TO_MSM(uart_port) ((struct msm_port *) uart_port)
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
new file mode 100644
index 00000000000..7e02c9c344f
--- /dev/null
+++ b/drivers/tty/serial/mxs-auart.c
@@ -0,0 +1,798 @@
+/*
+ * Freescale STMP37XX/STMP378X Application UART driver
+ *
+ * Author: dmitry pervushin <dimka@embeddedalley.com>
+ *
+ * Copyright 2008-2010 Freescale Semiconductor, Inc.
+ * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <asm/cacheflush.h>
+
+#define MXS_AUART_PORTS 5
+
+#define AUART_CTRL0 0x00000000
+#define AUART_CTRL0_SET 0x00000004
+#define AUART_CTRL0_CLR 0x00000008
+#define AUART_CTRL0_TOG 0x0000000c
+#define AUART_CTRL1 0x00000010
+#define AUART_CTRL1_SET 0x00000014
+#define AUART_CTRL1_CLR 0x00000018
+#define AUART_CTRL1_TOG 0x0000001c
+#define AUART_CTRL2 0x00000020
+#define AUART_CTRL2_SET 0x00000024
+#define AUART_CTRL2_CLR 0x00000028
+#define AUART_CTRL2_TOG 0x0000002c
+#define AUART_LINECTRL 0x00000030
+#define AUART_LINECTRL_SET 0x00000034
+#define AUART_LINECTRL_CLR 0x00000038
+#define AUART_LINECTRL_TOG 0x0000003c
+#define AUART_LINECTRL2 0x00000040
+#define AUART_LINECTRL2_SET 0x00000044
+#define AUART_LINECTRL2_CLR 0x00000048
+#define AUART_LINECTRL2_TOG 0x0000004c
+#define AUART_INTR 0x00000050
+#define AUART_INTR_SET 0x00000054
+#define AUART_INTR_CLR 0x00000058
+#define AUART_INTR_TOG 0x0000005c
+#define AUART_DATA 0x00000060
+#define AUART_STAT 0x00000070
+#define AUART_DEBUG 0x00000080
+#define AUART_VERSION 0x00000090
+#define AUART_AUTOBAUD 0x000000a0
+
+#define AUART_CTRL0_SFTRST (1 << 31)
+#define AUART_CTRL0_CLKGATE (1 << 30)
+
+#define AUART_CTRL2_CTSEN (1 << 15)
+#define AUART_CTRL2_RTS (1 << 11)
+#define AUART_CTRL2_RXE (1 << 9)
+#define AUART_CTRL2_TXE (1 << 8)
+#define AUART_CTRL2_UARTEN (1 << 0)
+
+#define AUART_LINECTRL_BAUD_DIVINT_SHIFT 16
+#define AUART_LINECTRL_BAUD_DIVINT_MASK 0xffff0000
+#define AUART_LINECTRL_BAUD_DIVINT(v) (((v) & 0xffff) << 16)
+#define AUART_LINECTRL_BAUD_DIVFRAC_SHIFT 8
+#define AUART_LINECTRL_BAUD_DIVFRAC_MASK 0x00003f00
+#define AUART_LINECTRL_BAUD_DIVFRAC(v) (((v) & 0x3f) << 8)
+#define AUART_LINECTRL_WLEN_MASK 0x00000060
+#define AUART_LINECTRL_WLEN(v) (((v) & 0x3) << 5)
+#define AUART_LINECTRL_FEN (1 << 4)
+#define AUART_LINECTRL_STP2 (1 << 3)
+#define AUART_LINECTRL_EPS (1 << 2)
+#define AUART_LINECTRL_PEN (1 << 1)
+#define AUART_LINECTRL_BRK (1 << 0)
+
+#define AUART_INTR_RTIEN (1 << 22)
+#define AUART_INTR_TXIEN (1 << 21)
+#define AUART_INTR_RXIEN (1 << 20)
+#define AUART_INTR_CTSMIEN (1 << 17)
+#define AUART_INTR_RTIS (1 << 6)
+#define AUART_INTR_TXIS (1 << 5)
+#define AUART_INTR_RXIS (1 << 4)
+#define AUART_INTR_CTSMIS (1 << 1)
+
+#define AUART_STAT_BUSY (1 << 29)
+#define AUART_STAT_CTS (1 << 28)
+#define AUART_STAT_TXFE (1 << 27)
+#define AUART_STAT_TXFF (1 << 25)
+#define AUART_STAT_RXFE (1 << 24)
+#define AUART_STAT_OERR (1 << 19)
+#define AUART_STAT_BERR (1 << 18)
+#define AUART_STAT_PERR (1 << 17)
+#define AUART_STAT_FERR (1 << 16)
+
+static struct uart_driver auart_driver;
+
+struct mxs_auart_port {
+ struct uart_port port;
+
+ unsigned int flags;
+ unsigned int ctrl;
+
+ unsigned int irq;
+
+ struct clk *clk;
+ struct device *dev;
+};
+
+static void mxs_auart_stop_tx(struct uart_port *u);
+
+#define to_auart_port(u) container_of(u, struct mxs_auart_port, port)
+
+static inline void mxs_auart_tx_chars(struct mxs_auart_port *s)
+{
+ struct circ_buf *xmit = &s->port.state->xmit;
+
+ while (!(readl(s->port.membase + AUART_STAT) &
+ AUART_STAT_TXFF)) {
+ if (s->port.x_char) {
+ s->port.icount.tx++;
+ writel(s->port.x_char,
+ s->port.membase + AUART_DATA);
+ s->port.x_char = 0;
+ continue;
+ }
+ if (!uart_circ_empty(xmit) && !uart_tx_stopped(&s->port)) {
+ s->port.icount.tx++;
+ writel(xmit->buf[xmit->tail],
+ s->port.membase + AUART_DATA);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&s->port);
+ } else
+ break;
+ }
+ if (uart_circ_empty(&(s->port.state->xmit)))
+ writel(AUART_INTR_TXIEN,
+ s->port.membase + AUART_INTR_CLR);
+ else
+ writel(AUART_INTR_TXIEN,
+ s->port.membase + AUART_INTR_SET);
+
+ if (uart_tx_stopped(&s->port))
+ mxs_auart_stop_tx(&s->port);
+}
+
+static void mxs_auart_rx_char(struct mxs_auart_port *s)
+{
+ int flag;
+ u32 stat;
+ u8 c;
+
+ c = readl(s->port.membase + AUART_DATA);
+ stat = readl(s->port.membase + AUART_STAT);
+
+ flag = TTY_NORMAL;
+ s->port.icount.rx++;
+
+ if (stat & AUART_STAT_BERR) {
+ s->port.icount.brk++;
+ if (uart_handle_break(&s->port))
+ goto out;
+ } else if (stat & AUART_STAT_PERR) {
+ s->port.icount.parity++;
+ } else if (stat & AUART_STAT_FERR) {
+ s->port.icount.frame++;
+ }
+
+ /*
+ * Mask off conditions which should be ingored.
+ */
+ stat &= s->port.read_status_mask;
+
+ if (stat & AUART_STAT_BERR) {
+ flag = TTY_BREAK;
+ } else if (stat & AUART_STAT_PERR)
+ flag = TTY_PARITY;
+ else if (stat & AUART_STAT_FERR)
+ flag = TTY_FRAME;
+
+ if (stat & AUART_STAT_OERR)
+ s->port.icount.overrun++;
+
+ if (uart_handle_sysrq_char(&s->port, c))
+ goto out;
+
+ uart_insert_char(&s->port, stat, AUART_STAT_OERR, c, flag);
+out:
+ writel(stat, s->port.membase + AUART_STAT);
+}
+
+static void mxs_auart_rx_chars(struct mxs_auart_port *s)
+{
+ struct tty_struct *tty = s->port.state->port.tty;
+ u32 stat = 0;
+
+ for (;;) {
+ stat = readl(s->port.membase + AUART_STAT);
+ if (stat & AUART_STAT_RXFE)
+ break;
+ mxs_auart_rx_char(s);
+ }
+
+ writel(stat, s->port.membase + AUART_STAT);
+ tty_flip_buffer_push(tty);
+}
+
+static int mxs_auart_request_port(struct uart_port *u)
+{
+ return 0;
+}
+
+static int mxs_auart_verify_port(struct uart_port *u,
+ struct serial_struct *ser)
+{
+ if (u->type != PORT_UNKNOWN && u->type != PORT_IMX)
+ return -EINVAL;
+ return 0;
+}
+
+static void mxs_auart_config_port(struct uart_port *u, int flags)
+{
+}
+
+static const char *mxs_auart_type(struct uart_port *u)
+{
+ struct mxs_auart_port *s = to_auart_port(u);
+
+ return dev_name(s->dev);
+}
+
+static void mxs_auart_release_port(struct uart_port *u)
+{
+}
+
+static void mxs_auart_set_mctrl(struct uart_port *u, unsigned mctrl)
+{
+ struct mxs_auart_port *s = to_auart_port(u);
+
+ u32 ctrl = readl(u->membase + AUART_CTRL2);
+
+ ctrl &= ~AUART_CTRL2_RTS;
+ if (mctrl & TIOCM_RTS)
+ ctrl |= AUART_CTRL2_RTS;
+ s->ctrl = mctrl;
+ writel(ctrl, u->membase + AUART_CTRL2);
+}
+
+static u32 mxs_auart_get_mctrl(struct uart_port *u)
+{
+ struct mxs_auart_port *s = to_auart_port(u);
+ u32 stat = readl(u->membase + AUART_STAT);
+ int ctrl2 = readl(u->membase + AUART_CTRL2);
+ u32 mctrl = s->ctrl;
+
+ mctrl &= ~TIOCM_CTS;
+ if (stat & AUART_STAT_CTS)
+ mctrl |= TIOCM_CTS;
+
+ if (ctrl2 & AUART_CTRL2_RTS)
+ mctrl |= TIOCM_RTS;
+
+ return mctrl;
+}
+
+static void mxs_auart_settermios(struct uart_port *u,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ u32 bm, ctrl, ctrl2, div;
+ unsigned int cflag, baud;
+
+ cflag = termios->c_cflag;
+
+ ctrl = AUART_LINECTRL_FEN;
+ ctrl2 = readl(u->membase + AUART_CTRL2);
+
+ /* byte size */
+ switch (cflag & CSIZE) {
+ case CS5:
+ bm = 0;
+ break;
+ case CS6:
+ bm = 1;
+ break;
+ case CS7:
+ bm = 2;
+ break;
+ case CS8:
+ bm = 3;
+ break;
+ default:
+ return;
+ }
+
+ ctrl |= AUART_LINECTRL_WLEN(bm);
+
+ /* parity */
+ if (cflag & PARENB) {
+ ctrl |= AUART_LINECTRL_PEN;
+ if ((cflag & PARODD) == 0)
+ ctrl |= AUART_LINECTRL_EPS;
+ }
+
+ u->read_status_mask = 0;
+
+ if (termios->c_iflag & INPCK)
+ u->read_status_mask |= AUART_STAT_PERR;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ u->read_status_mask |= AUART_STAT_BERR;
+
+ /*
+ * Characters to ignore
+ */
+ u->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ u->ignore_status_mask |= AUART_STAT_PERR;
+ if (termios->c_iflag & IGNBRK) {
+ u->ignore_status_mask |= AUART_STAT_BERR;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ u->ignore_status_mask |= AUART_STAT_OERR;
+ }
+
+ /*
+ * ignore all characters if CREAD is not set
+ */
+ if (cflag & CREAD)
+ ctrl2 |= AUART_CTRL2_RXE;
+ else
+ ctrl2 &= ~AUART_CTRL2_RXE;
+
+ /* figure out the stop bits requested */
+ if (cflag & CSTOPB)
+ ctrl |= AUART_LINECTRL_STP2;
+
+ /* figure out the hardware flow control settings */
+ if (cflag & CRTSCTS)
+ ctrl2 |= AUART_CTRL2_CTSEN;
+ else
+ ctrl2 &= ~AUART_CTRL2_CTSEN;
+
+ /* set baud rate */
+ baud = uart_get_baud_rate(u, termios, old, 0, u->uartclk);
+ div = u->uartclk * 32 / baud;
+ ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F);
+ ctrl |= AUART_LINECTRL_BAUD_DIVINT(div >> 6);
+
+ writel(ctrl, u->membase + AUART_LINECTRL);
+ writel(ctrl2, u->membase + AUART_CTRL2);
+}
+
+static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
+{
+ u32 istatus, istat;
+ struct mxs_auart_port *s = context;
+ u32 stat = readl(s->port.membase + AUART_STAT);
+
+ istatus = istat = readl(s->port.membase + AUART_INTR);
+
+ if (istat & AUART_INTR_CTSMIS) {
+ uart_handle_cts_change(&s->port, stat & AUART_STAT_CTS);
+ writel(AUART_INTR_CTSMIS,
+ s->port.membase + AUART_INTR_CLR);
+ istat &= ~AUART_INTR_CTSMIS;
+ }
+
+ if (istat & (AUART_INTR_RTIS | AUART_INTR_RXIS)) {
+ mxs_auart_rx_chars(s);
+ istat &= ~(AUART_INTR_RTIS | AUART_INTR_RXIS);
+ }
+
+ if (istat & AUART_INTR_TXIS) {
+ mxs_auart_tx_chars(s);
+ istat &= ~AUART_INTR_TXIS;
+ }
+
+ writel(istatus & (AUART_INTR_RTIS
+ | AUART_INTR_TXIS
+ | AUART_INTR_RXIS
+ | AUART_INTR_CTSMIS),
+ s->port.membase + AUART_INTR_CLR);
+
+ return IRQ_HANDLED;
+}
+
+static void mxs_auart_reset(struct uart_port *u)
+{
+ int i;
+ unsigned int reg;
+
+ writel(AUART_CTRL0_SFTRST, u->membase + AUART_CTRL0_CLR);
+
+ for (i = 0; i < 10000; i++) {
+ reg = readl(u->membase + AUART_CTRL0);
+ if (!(reg & AUART_CTRL0_SFTRST))
+ break;
+ udelay(3);
+ }
+ writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_CLR);
+}
+
+static int mxs_auart_startup(struct uart_port *u)
+{
+ struct mxs_auart_port *s = to_auart_port(u);
+
+ clk_enable(s->clk);
+
+ writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_CLR);
+
+ writel(AUART_CTRL2_UARTEN, u->membase + AUART_CTRL2_SET);
+
+ writel(AUART_INTR_RXIEN | AUART_INTR_RTIEN | AUART_INTR_CTSMIEN,
+ u->membase + AUART_INTR);
+
+ /*
+ * Enable fifo so all four bytes of a DMA word are written to
+ * output (otherwise, only the LSB is written, ie. 1 in 4 bytes)
+ */
+ writel(AUART_LINECTRL_FEN, u->membase + AUART_LINECTRL_SET);
+
+ return 0;
+}
+
+static void mxs_auart_shutdown(struct uart_port *u)
+{
+ struct mxs_auart_port *s = to_auart_port(u);
+
+ writel(AUART_CTRL2_UARTEN, u->membase + AUART_CTRL2_CLR);
+
+ writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_SET);
+
+ writel(AUART_INTR_RXIEN | AUART_INTR_RTIEN | AUART_INTR_CTSMIEN,
+ u->membase + AUART_INTR_CLR);
+
+ clk_disable(s->clk);
+}
+
+static unsigned int mxs_auart_tx_empty(struct uart_port *u)
+{
+ if (readl(u->membase + AUART_STAT) & AUART_STAT_TXFE)
+ return TIOCSER_TEMT;
+ else
+ return 0;
+}
+
+static void mxs_auart_start_tx(struct uart_port *u)
+{
+ struct mxs_auart_port *s = to_auart_port(u);
+
+ /* enable transmitter */
+ writel(AUART_CTRL2_TXE, u->membase + AUART_CTRL2_SET);
+
+ mxs_auart_tx_chars(s);
+}
+
+static void mxs_auart_stop_tx(struct uart_port *u)
+{
+ writel(AUART_CTRL2_TXE, u->membase + AUART_CTRL2_CLR);
+}
+
+static void mxs_auart_stop_rx(struct uart_port *u)
+{
+ writel(AUART_CTRL2_RXE, u->membase + AUART_CTRL2_CLR);
+}
+
+static void mxs_auart_break_ctl(struct uart_port *u, int ctl)
+{
+ if (ctl)
+ writel(AUART_LINECTRL_BRK,
+ u->membase + AUART_LINECTRL_SET);
+ else
+ writel(AUART_LINECTRL_BRK,
+ u->membase + AUART_LINECTRL_CLR);
+}
+
+static void mxs_auart_enable_ms(struct uart_port *port)
+{
+ /* just empty */
+}
+
+static struct uart_ops mxs_auart_ops = {
+ .tx_empty = mxs_auart_tx_empty,
+ .start_tx = mxs_auart_start_tx,
+ .stop_tx = mxs_auart_stop_tx,
+ .stop_rx = mxs_auart_stop_rx,
+ .enable_ms = mxs_auart_enable_ms,
+ .break_ctl = mxs_auart_break_ctl,
+ .set_mctrl = mxs_auart_set_mctrl,
+ .get_mctrl = mxs_auart_get_mctrl,
+ .startup = mxs_auart_startup,
+ .shutdown = mxs_auart_shutdown,
+ .set_termios = mxs_auart_settermios,
+ .type = mxs_auart_type,
+ .release_port = mxs_auart_release_port,
+ .request_port = mxs_auart_request_port,
+ .config_port = mxs_auart_config_port,
+ .verify_port = mxs_auart_verify_port,
+};
+
+static struct mxs_auart_port *auart_port[MXS_AUART_PORTS];
+
+#ifdef CONFIG_SERIAL_MXS_AUART_CONSOLE
+static void mxs_auart_console_putchar(struct uart_port *port, int ch)
+{
+ unsigned int to = 1000;
+
+ while (readl(port->membase + AUART_STAT) & AUART_STAT_TXFF) {
+ if (!to--)
+ break;
+ udelay(1);
+ }
+
+ writel(ch, port->membase + AUART_DATA);
+}
+
+static void
+auart_console_write(struct console *co, const char *str, unsigned int count)
+{
+ struct mxs_auart_port *s;
+ struct uart_port *port;
+ unsigned int old_ctrl0, old_ctrl2;
+ unsigned int to = 1000;
+
+ if (co->index > MXS_AUART_PORTS || co->index < 0)
+ return;
+
+ s = auart_port[co->index];
+ port = &s->port;
+
+ clk_enable(s->clk);
+
+ /* First save the CR then disable the interrupts */
+ old_ctrl2 = readl(port->membase + AUART_CTRL2);
+ old_ctrl0 = readl(port->membase + AUART_CTRL0);
+
+ writel(AUART_CTRL0_CLKGATE,
+ port->membase + AUART_CTRL0_CLR);
+ writel(AUART_CTRL2_UARTEN | AUART_CTRL2_TXE,
+ port->membase + AUART_CTRL2_SET);
+
+ uart_console_write(port, str, count, mxs_auart_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the TCR
+ */
+ while (readl(port->membase + AUART_STAT) & AUART_STAT_BUSY) {
+ if (!to--)
+ break;
+ udelay(1);
+ }
+
+ writel(old_ctrl0, port->membase + AUART_CTRL0);
+ writel(old_ctrl2, port->membase + AUART_CTRL2);
+
+ clk_disable(s->clk);
+}
+
+static void __init
+auart_console_get_options(struct uart_port *port, int *baud,
+ int *parity, int *bits)
+{
+ unsigned int lcr_h, quot;
+
+ if (!(readl(port->membase + AUART_CTRL2) & AUART_CTRL2_UARTEN))
+ return;
+
+ lcr_h = readl(port->membase + AUART_LINECTRL);
+
+ *parity = 'n';
+ if (lcr_h & AUART_LINECTRL_PEN) {
+ if (lcr_h & AUART_LINECTRL_EPS)
+ *parity = 'e';
+ else
+ *parity = 'o';
+ }
+
+ if ((lcr_h & AUART_LINECTRL_WLEN_MASK) == AUART_LINECTRL_WLEN(2))
+ *bits = 7;
+ else
+ *bits = 8;
+
+ quot = ((readl(port->membase + AUART_LINECTRL)
+ & AUART_LINECTRL_BAUD_DIVINT_MASK))
+ >> (AUART_LINECTRL_BAUD_DIVINT_SHIFT - 6);
+ quot |= ((readl(port->membase + AUART_LINECTRL)
+ & AUART_LINECTRL_BAUD_DIVFRAC_MASK))
+ >> AUART_LINECTRL_BAUD_DIVFRAC_SHIFT;
+ if (quot == 0)
+ quot = 1;
+
+ *baud = (port->uartclk << 2) / quot;
+}
+
+static int __init
+auart_console_setup(struct console *co, char *options)
+{
+ struct mxs_auart_port *s;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+ int ret;
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (co->index == -1 || co->index >= ARRAY_SIZE(auart_port))
+ co->index = 0;
+ s = auart_port[co->index];
+ if (!s)
+ return -ENODEV;
+
+ clk_enable(s->clk);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ else
+ auart_console_get_options(&s->port, &baud, &parity, &bits);
+
+ ret = uart_set_options(&s->port, co, baud, parity, bits, flow);
+
+ clk_disable(s->clk);
+
+ return ret;
+}
+
+static struct console auart_console = {
+ .name = "ttyAPP",
+ .write = auart_console_write,
+ .device = uart_console_device,
+ .setup = auart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &auart_driver,
+};
+#endif
+
+static struct uart_driver auart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "ttyAPP",
+ .dev_name = "ttyAPP",
+ .major = 0,
+ .minor = 0,
+ .nr = MXS_AUART_PORTS,
+#ifdef CONFIG_SERIAL_MXS_AUART_CONSOLE
+ .cons = &auart_console,
+#endif
+};
+
+static int __devinit mxs_auart_probe(struct platform_device *pdev)
+{
+ struct mxs_auart_port *s;
+ u32 version;
+ int ret = 0;
+ struct resource *r;
+
+ s = kzalloc(sizeof(struct mxs_auart_port), GFP_KERNEL);
+ if (!s) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ s->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(s->clk)) {
+ ret = PTR_ERR(s->clk);
+ goto out_free;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ ret = -ENXIO;
+ goto out_free_clk;
+ }
+
+ s->port.mapbase = r->start;
+ s->port.membase = ioremap(r->start, resource_size(r));
+ s->port.ops = &mxs_auart_ops;
+ s->port.iotype = UPIO_MEM;
+ s->port.line = pdev->id < 0 ? 0 : pdev->id;
+ s->port.fifosize = 16;
+ s->port.uartclk = clk_get_rate(s->clk);
+ s->port.type = PORT_IMX;
+ s->port.dev = s->dev = get_device(&pdev->dev);
+
+ s->flags = 0;
+ s->ctrl = 0;
+
+ s->irq = platform_get_irq(pdev, 0);
+ s->port.irq = s->irq;
+ ret = request_irq(s->irq, mxs_auart_irq_handle, 0, dev_name(&pdev->dev), s);
+ if (ret)
+ goto out_free_clk;
+
+ platform_set_drvdata(pdev, s);
+
+ auart_port[pdev->id] = s;
+
+ mxs_auart_reset(&s->port);
+
+ ret = uart_add_one_port(&auart_driver, &s->port);
+ if (ret)
+ goto out_free_irq;
+
+ version = readl(s->port.membase + AUART_VERSION);
+ dev_info(&pdev->dev, "Found APPUART %d.%d.%d\n",
+ (version >> 24) & 0xff,
+ (version >> 16) & 0xff, version & 0xffff);
+
+ return 0;
+
+out_free_irq:
+ auart_port[pdev->id] = NULL;
+ free_irq(s->irq, s);
+out_free_clk:
+ clk_put(s->clk);
+out_free:
+ kfree(s);
+out:
+ return ret;
+}
+
+static int __devexit mxs_auart_remove(struct platform_device *pdev)
+{
+ struct mxs_auart_port *s = platform_get_drvdata(pdev);
+
+ uart_remove_one_port(&auart_driver, &s->port);
+
+ auart_port[pdev->id] = NULL;
+
+ clk_put(s->clk);
+ free_irq(s->irq, s);
+ kfree(s);
+
+ return 0;
+}
+
+static struct platform_driver mxs_auart_driver = {
+ .probe = mxs_auart_probe,
+ .remove = __devexit_p(mxs_auart_remove),
+ .driver = {
+ .name = "mxs-auart",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mxs_auart_init(void)
+{
+ int r;
+
+ r = uart_register_driver(&auart_driver);
+ if (r)
+ goto out;
+
+ r = platform_driver_register(&mxs_auart_driver);
+ if (r)
+ goto out_err;
+
+ return 0;
+out_err:
+ uart_unregister_driver(&auart_driver);
+out:
+ return r;
+}
+
+static void __exit mxs_auart_exit(void)
+{
+ platform_driver_unregister(&mxs_auart_driver);
+ uart_unregister_driver(&auart_driver);
+}
+
+module_init(mxs_auart_init);
+module_exit(mxs_auart_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Freescale MXS application uart driver");
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index 5c7abe4c94d..6a18ca6ddaa 100644
--- a/drivers/tty/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
@@ -160,17 +160,17 @@ static int of_platform_serial_remove(struct platform_device *ofdev)
* A few common types, add more as needed.
*/
static struct of_device_id __devinitdata of_platform_serial_table[] = {
- { .type = "serial", .compatible = "ns8250", .data = (void *)PORT_8250, },
- { .type = "serial", .compatible = "ns16450", .data = (void *)PORT_16450, },
- { .type = "serial", .compatible = "ns16550a", .data = (void *)PORT_16550A, },
- { .type = "serial", .compatible = "ns16550", .data = (void *)PORT_16550, },
- { .type = "serial", .compatible = "ns16750", .data = (void *)PORT_16750, },
- { .type = "serial", .compatible = "ns16850", .data = (void *)PORT_16850, },
+ { .compatible = "ns8250", .data = (void *)PORT_8250, },
+ { .compatible = "ns16450", .data = (void *)PORT_16450, },
+ { .compatible = "ns16550a", .data = (void *)PORT_16550A, },
+ { .compatible = "ns16550", .data = (void *)PORT_16550, },
+ { .compatible = "ns16750", .data = (void *)PORT_16750, },
+ { .compatible = "ns16850", .data = (void *)PORT_16850, },
#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL
- { .type = "serial", .compatible = "ibm,qpace-nwp-serial",
- .data = (void *)PORT_NWPSERIAL, },
+ { .compatible = "ibm,qpace-nwp-serial",
+ .data = (void *)PORT_NWPSERIAL, },
#endif
- { .type = "serial", .data = (void *)PORT_UNKNOWN, },
+ { .type = "serial", .data = (void *)PORT_UNKNOWN, },
{ /* end of list */ },
};
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 7f2f0105878..699b34446a5 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -20,6 +20,10 @@
* this driver as required for the omap-platform.
*/
+#if defined(CONFIG_SERIAL_OMAP_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/console.h>
@@ -190,7 +194,6 @@ static inline void receive_chars(struct uart_omap_port *up, int *status)
if (up->port.line == up->port.cons->index) {
/* Recover the break flag from console xmit */
lsr |= up->lsr_break_flag;
- up->lsr_break_flag = 0;
}
#endif
if (lsr & UART_LSR_BI)
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index 99ac70e3255..b268e9fccb4 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -168,7 +168,6 @@ struct uea_softc {
union cmv_dsc cmv_dsc;
struct work_struct task;
- struct workqueue_struct *work_q;
u16 pageno;
u16 ovl;
@@ -1879,7 +1878,7 @@ static int uea_start_reset(struct uea_softc *sc)
/* start loading DSP */
sc->pageno = 0;
sc->ovl = 0;
- queue_work(sc->work_q, &sc->task);
+ schedule_work(&sc->task);
/* wait for modem ready CMV */
ret = wait_cmv_ack(sc);
@@ -2091,14 +2090,14 @@ static void uea_schedule_load_page_e1(struct uea_softc *sc,
{
sc->pageno = intr->e1_bSwapPageNo;
sc->ovl = intr->e1_bOvl >> 4 | intr->e1_bOvl << 4;
- queue_work(sc->work_q, &sc->task);
+ schedule_work(&sc->task);
}
static void uea_schedule_load_page_e4(struct uea_softc *sc,
struct intr_pkt *intr)
{
sc->pageno = intr->e4_bSwapPageNo;
- queue_work(sc->work_q, &sc->task);
+ schedule_work(&sc->task);
}
/*
@@ -2170,13 +2169,6 @@ static int uea_boot(struct uea_softc *sc)
init_waitqueue_head(&sc->sync_q);
- sc->work_q = create_workqueue("ueagle-dsp");
- if (!sc->work_q) {
- uea_err(INS_TO_USBDEV(sc), "cannot allocate workqueue\n");
- uea_leaves(INS_TO_USBDEV(sc));
- return -ENOMEM;
- }
-
if (UEA_CHIP_VERSION(sc) == ADI930)
load_XILINX_firmware(sc);
@@ -2225,7 +2217,6 @@ err1:
sc->urb_int = NULL;
kfree(intr);
err0:
- destroy_workqueue(sc->work_q);
uea_leaves(INS_TO_USBDEV(sc));
return -ENOMEM;
}
@@ -2246,8 +2237,8 @@ static void uea_stop(struct uea_softc *sc)
kfree(sc->urb_int->transfer_buffer);
usb_free_urb(sc->urb_int);
- /* stop any pending boot process, when no one can schedule work */
- destroy_workqueue(sc->work_q);
+ /* flush the work item, when no one can schedule it */
+ flush_work_sync(&sc->task);
if (sc->dsp_firm)
release_firmware(sc->dsp_firm);
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index 2c6965484fe..b0585e623ba 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -10,7 +10,7 @@
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/mm.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/usb.h>
@@ -22,7 +22,7 @@
*/
/* FIXME tune these based on pool statistics ... */
-static const size_t pool_max [HCD_BUFFER_POOLS] = {
+static const size_t pool_max[HCD_BUFFER_POOLS] = {
/* platforms without dma-friendly caches might need to
* prevent cacheline sharing...
*/
@@ -51,7 +51,7 @@ static const size_t pool_max [HCD_BUFFER_POOLS] = {
int hcd_buffer_create(struct usb_hcd *hcd)
{
char name[16];
- int i, size;
+ int i, size;
if (!hcd->self.controller->dma_mask &&
!(hcd->driver->flags & HCD_LOCAL_MEM))
@@ -64,7 +64,7 @@ int hcd_buffer_create(struct usb_hcd *hcd)
snprintf(name, sizeof name, "buffer-%d", size);
hcd->pool[i] = dma_pool_create(name, hcd->self.controller,
size, size, 0);
- if (!hcd->pool [i]) {
+ if (!hcd->pool[i]) {
hcd_buffer_destroy(hcd);
return -ENOMEM;
}
@@ -99,14 +99,14 @@ void hcd_buffer_destroy(struct usb_hcd *hcd)
*/
void *hcd_buffer_alloc(
- struct usb_bus *bus,
+ struct usb_bus *bus,
size_t size,
gfp_t mem_flags,
dma_addr_t *dma
)
{
struct usb_hcd *hcd = bus_to_hcd(bus);
- int i;
+ int i;
/* some USB hosts just use PIO */
if (!bus->controller->dma_mask &&
@@ -116,21 +116,21 @@ void *hcd_buffer_alloc(
}
for (i = 0; i < HCD_BUFFER_POOLS; i++) {
- if (size <= pool_max [i])
- return dma_pool_alloc(hcd->pool [i], mem_flags, dma);
+ if (size <= pool_max[i])
+ return dma_pool_alloc(hcd->pool[i], mem_flags, dma);
}
return dma_alloc_coherent(hcd->self.controller, size, dma, mem_flags);
}
void hcd_buffer_free(
- struct usb_bus *bus,
+ struct usb_bus *bus,
size_t size,
- void *addr,
+ void *addr,
dma_addr_t dma
)
{
struct usb_hcd *hcd = bus_to_hcd(bus);
- int i;
+ int i;
if (!addr)
return;
@@ -142,8 +142,8 @@ void hcd_buffer_free(
}
for (i = 0; i < HCD_BUFFER_POOLS; i++) {
- if (size <= pool_max [i]) {
- dma_pool_free(hcd->pool [i], addr, dma);
+ if (size <= pool_max[i]) {
+ dma_pool_free(hcd->pool[i], addr, dma);
return;
}
}
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index fca61720b87..38072e4e74b 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1659,6 +1659,11 @@ static int usb_runtime_suspend(struct device *dev)
return -EAGAIN;
status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
+ /* The PM core reacts badly unless the return code is 0,
+ * -EAGAIN, or -EBUSY, so always return -EBUSY on an error.
+ */
+ if (status != 0)
+ return -EBUSY;
return status;
}
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index d37088591d9..b992a886f05 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -192,13 +192,13 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
"Found HC with no IRQ. Check BIOS/PCI %s setup!\n",
pci_name(dev));
retval = -ENODEV;
- goto err1;
+ goto disable_pci;
}
hcd = usb_create_hcd(driver, &dev->dev, pci_name(dev));
if (!hcd) {
retval = -ENOMEM;
- goto err1;
+ goto disable_pci;
}
if (driver->flags & HCD_MEMORY) {
@@ -209,13 +209,13 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
driver->description)) {
dev_dbg(&dev->dev, "controller already in use\n");
retval = -EBUSY;
- goto err2;
+ goto clear_companion;
}
hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
if (hcd->regs == NULL) {
dev_dbg(&dev->dev, "error mapping memory\n");
retval = -EFAULT;
- goto err3;
+ goto release_mem_region;
}
} else {
@@ -236,7 +236,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (region == PCI_ROM_RESOURCE) {
dev_dbg(&dev->dev, "no i/o regions available\n");
retval = -EBUSY;
- goto err2;
+ goto clear_companion;
}
}
@@ -244,24 +244,24 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
retval = usb_add_hcd(hcd, dev->irq, IRQF_DISABLED | IRQF_SHARED);
if (retval != 0)
- goto err4;
+ goto unmap_registers;
set_hs_companion(dev, hcd);
if (pci_dev_run_wake(dev))
pm_runtime_put_noidle(&dev->dev);
return retval;
- err4:
+unmap_registers:
if (driver->flags & HCD_MEMORY) {
iounmap(hcd->regs);
- err3:
+release_mem_region:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
} else
release_region(hcd->rsrc_start, hcd->rsrc_len);
- err2:
+clear_companion:
clear_hs_companion(dev, hcd);
usb_put_hcd(hcd);
- err1:
+disable_pci:
pci_disable_device(dev);
dev_err(&dev->dev, "init %s fail, %d\n", pci_name(dev), retval);
return retval;
@@ -367,6 +367,13 @@ static int check_root_hub_suspended(struct device *dev)
dev_warn(dev, "Root hub is not suspended\n");
return -EBUSY;
}
+ if (hcd->shared_hcd) {
+ hcd = hcd->shared_hcd;
+ if (HCD_RH_RUNNING(hcd)) {
+ dev_warn(dev, "Secondary root hub is not suspended\n");
+ return -EBUSY;
+ }
+ }
return 0;
}
@@ -391,11 +398,16 @@ static int suspend_common(struct device *dev, bool do_wakeup)
*/
if (do_wakeup && HCD_WAKEUP_PENDING(hcd))
return -EBUSY;
+ if (do_wakeup && hcd->shared_hcd &&
+ HCD_WAKEUP_PENDING(hcd->shared_hcd))
+ return -EBUSY;
retval = hcd->driver->pci_suspend(hcd, do_wakeup);
suspend_report_result(hcd->driver->pci_suspend, retval);
/* Check again in case wakeup raced with pci_suspend */
- if (retval == 0 && do_wakeup && HCD_WAKEUP_PENDING(hcd)) {
+ if ((retval == 0 && do_wakeup && HCD_WAKEUP_PENDING(hcd)) ||
+ (retval == 0 && do_wakeup && hcd->shared_hcd &&
+ HCD_WAKEUP_PENDING(hcd->shared_hcd))) {
if (hcd->driver->pci_resume)
hcd->driver->pci_resume(hcd, false);
retval = -EBUSY;
@@ -426,7 +438,9 @@ static int resume_common(struct device *dev, int event)
struct usb_hcd *hcd = pci_get_drvdata(pci_dev);
int retval;
- if (HCD_RH_RUNNING(hcd)) {
+ if (HCD_RH_RUNNING(hcd) ||
+ (hcd->shared_hcd &&
+ HCD_RH_RUNNING(hcd->shared_hcd))) {
dev_dbg(dev, "can't resume, not suspended!\n");
return 0;
}
@@ -440,6 +454,8 @@ static int resume_common(struct device *dev, int event)
pci_set_master(pci_dev);
clear_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
+ if (hcd->shared_hcd)
+ clear_bit(HCD_FLAG_SAW_IRQ, &hcd->shared_hcd->flags);
if (hcd->driver->pci_resume && !HCD_DEAD(hcd)) {
if (event != PM_EVENT_AUTO_RESUME)
@@ -449,6 +465,8 @@ static int resume_common(struct device *dev, int event)
event == PM_EVENT_RESTORE);
if (retval) {
dev_err(dev, "PCI post-resume error %d!\n", retval);
+ if (hcd->shared_hcd)
+ usb_hc_died(hcd->shared_hcd);
usb_hc_died(hcd);
}
}
@@ -474,8 +492,9 @@ static int hcd_pci_suspend_noirq(struct device *dev)
pci_save_state(pci_dev);
- /* If the root hub is dead rather than suspended,
- * disallow remote wakeup.
+ /* If the root hub is dead rather than suspended, disallow remote
+ * wakeup. usb_hc_died() should ensure that both hosts are marked as
+ * dying, so we only need to check the primary roothub.
*/
if (HCD_DEAD(hcd))
device_set_wakeup_enable(dev, 0);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index fd95b9e94bb..bf80670bb61 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -297,7 +297,7 @@ static const u8 ss_rh_config_descriptor[] = {
/* one configuration */
0x09, /* __u8 bLength; */
0x02, /* __u8 bDescriptorType; Configuration */
- 0x19, 0x00, /* __le16 wTotalLength; FIXME */
+ 0x1f, 0x00, /* __le16 wTotalLength; */
0x01, /* __u8 bNumInterfaces; (1) */
0x01, /* __u8 bConfigurationValue; */
0x00, /* __u8 iConfiguration; */
@@ -327,11 +327,14 @@ static const u8 ss_rh_config_descriptor[] = {
/* __le16 ep_wMaxPacketSize; 1 + (MAX_ROOT_PORTS / 8)
* see hub.c:hub_configure() for details. */
(USB_MAXCHILDREN + 1 + 7) / 8, 0x00,
- 0x0c /* __u8 ep_bInterval; (256ms -- usb 2.0 spec) */
- /*
- * All 3.0 hubs should have an endpoint companion descriptor,
- * but we're ignoring that for now. FIXME?
- */
+ 0x0c, /* __u8 ep_bInterval; (256ms -- usb 2.0 spec) */
+
+ /* one SuperSpeed endpoint companion descriptor */
+ 0x06, /* __u8 ss_bLength */
+ 0x30, /* __u8 ss_bDescriptorType; SuperSpeed EP Companion */
+ 0x00, /* __u8 ss_bMaxBurst; allows 1 TX between ACKs */
+ 0x00, /* __u8 ss_bmAttributes; 1 packet per service interval */
+ 0x02, 0x00 /* __le16 ss_wBytesPerInterval; 15 bits for max 15 ports */
};
/*-------------------------------------------------------------------------*/
@@ -504,7 +507,7 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
switch (wValue & 0xff00) {
case USB_DT_DEVICE << 8:
- switch (hcd->driver->flags & HCD_MASK) {
+ switch (hcd->speed) {
case HCD_USB3:
bufp = usb3_rh_dev_descriptor;
break;
@@ -522,7 +525,7 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
patch_protocol = 1;
break;
case USB_DT_CONFIG << 8:
- switch (hcd->driver->flags & HCD_MASK) {
+ switch (hcd->speed) {
case HCD_USB3:
bufp = ss_rh_config_descriptor;
len = sizeof ss_rh_config_descriptor;
@@ -1150,6 +1153,8 @@ int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
dev_warn(hcd->self.controller, "Unlink after no-IRQ? "
"Controller is probably using the wrong IRQ.\n");
set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
+ if (hcd->shared_hcd)
+ set_bit(HCD_FLAG_SAW_IRQ, &hcd->shared_hcd->flags);
}
return 0;
@@ -1259,7 +1264,7 @@ static void hcd_free_coherent(struct usb_bus *bus, dma_addr_t *dma_handle,
*dma_handle = 0;
}
-void unmap_urb_setup_for_dma(struct usb_hcd *hcd, struct urb *urb)
+void usb_hcd_unmap_urb_setup_for_dma(struct usb_hcd *hcd, struct urb *urb)
{
if (urb->transfer_flags & URB_SETUP_MAP_SINGLE)
dma_unmap_single(hcd->self.controller,
@@ -1276,13 +1281,21 @@ void unmap_urb_setup_for_dma(struct usb_hcd *hcd, struct urb *urb)
/* Make it safe to call this routine more than once */
urb->transfer_flags &= ~(URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL);
}
-EXPORT_SYMBOL_GPL(unmap_urb_setup_for_dma);
+EXPORT_SYMBOL_GPL(usb_hcd_unmap_urb_setup_for_dma);
+
+static void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+{
+ if (hcd->driver->unmap_urb_for_dma)
+ hcd->driver->unmap_urb_for_dma(hcd, urb);
+ else
+ usb_hcd_unmap_urb_for_dma(hcd, urb);
+}
-void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+void usb_hcd_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
{
enum dma_data_direction dir;
- unmap_urb_setup_for_dma(hcd, urb);
+ usb_hcd_unmap_urb_setup_for_dma(hcd, urb);
dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
if (urb->transfer_flags & URB_DMA_MAP_SG)
@@ -1311,11 +1324,20 @@ void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
urb->transfer_flags &= ~(URB_DMA_MAP_SG | URB_DMA_MAP_PAGE |
URB_DMA_MAP_SINGLE | URB_MAP_LOCAL);
}
-EXPORT_SYMBOL_GPL(unmap_urb_for_dma);
+EXPORT_SYMBOL_GPL(usb_hcd_unmap_urb_for_dma);
static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
+ if (hcd->driver->map_urb_for_dma)
+ return hcd->driver->map_urb_for_dma(hcd, urb, mem_flags);
+ else
+ return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+}
+
+int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
enum dma_data_direction dir;
int ret = 0;
@@ -1407,10 +1429,11 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
}
if (ret && (urb->transfer_flags & (URB_SETUP_MAP_SINGLE |
URB_SETUP_MAP_LOCAL)))
- unmap_urb_for_dma(hcd, urb);
+ usb_hcd_unmap_urb_for_dma(hcd, urb);
}
return ret;
}
+EXPORT_SYMBOL_GPL(usb_hcd_map_urb_for_dma);
/*-------------------------------------------------------------------------*/
@@ -2103,6 +2126,8 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd)
rc = IRQ_NONE;
} else {
set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
+ if (hcd->shared_hcd)
+ set_bit(HCD_FLAG_SAW_IRQ, &hcd->shared_hcd->flags);
if (unlikely(hcd->state == HC_STATE_HALT))
usb_hc_died(hcd);
@@ -2122,7 +2147,9 @@ EXPORT_SYMBOL_GPL(usb_hcd_irq);
*
* This is called by bus glue to report a USB host controller that died
* while operations may still have been pending. It's called automatically
- * by the PCI glue, so only glue for non-PCI busses should need to call it.
+ * by the PCI glue, so only glue for non-PCI busses should need to call it.
+ *
+ * Only call this function with the primary HCD.
*/
void usb_hc_died (struct usb_hcd *hcd)
{
@@ -2141,17 +2168,31 @@ void usb_hc_died (struct usb_hcd *hcd)
USB_STATE_NOTATTACHED);
usb_kick_khubd (hcd->self.root_hub);
}
+ if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) {
+ hcd = hcd->shared_hcd;
+ if (hcd->rh_registered) {
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+
+ /* make khubd clean up old urbs and devices */
+ usb_set_device_state(hcd->self.root_hub,
+ USB_STATE_NOTATTACHED);
+ usb_kick_khubd(hcd->self.root_hub);
+ }
+ }
spin_unlock_irqrestore (&hcd_root_hub_lock, flags);
+ /* Make sure that the other roothub is also deallocated. */
}
EXPORT_SYMBOL_GPL (usb_hc_died);
/*-------------------------------------------------------------------------*/
/**
- * usb_create_hcd - create and initialize an HCD structure
+ * usb_create_shared_hcd - create and initialize an HCD structure
* @driver: HC driver that will use this hcd
* @dev: device for this HC, stored in hcd->self.controller
* @bus_name: value to store in hcd->self.bus_name
+ * @primary_hcd: a pointer to the usb_hcd structure that is sharing the
+ * PCI device. Only allocate certain resources for the primary HCD
* Context: !in_interrupt()
*
* Allocate a struct usb_hcd, with extra space at the end for the
@@ -2160,8 +2201,9 @@ EXPORT_SYMBOL_GPL (usb_hc_died);
*
* If memory is unavailable, returns NULL.
*/
-struct usb_hcd *usb_create_hcd (const struct hc_driver *driver,
- struct device *dev, const char *bus_name)
+struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
+ struct device *dev, const char *bus_name,
+ struct usb_hcd *primary_hcd)
{
struct usb_hcd *hcd;
@@ -2170,7 +2212,24 @@ struct usb_hcd *usb_create_hcd (const struct hc_driver *driver,
dev_dbg (dev, "hcd alloc failed\n");
return NULL;
}
- dev_set_drvdata(dev, hcd);
+ if (primary_hcd == NULL) {
+ hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex),
+ GFP_KERNEL);
+ if (!hcd->bandwidth_mutex) {
+ kfree(hcd);
+ dev_dbg(dev, "hcd bandwidth mutex alloc failed\n");
+ return NULL;
+ }
+ mutex_init(hcd->bandwidth_mutex);
+ dev_set_drvdata(dev, hcd);
+ } else {
+ hcd->bandwidth_mutex = primary_hcd->bandwidth_mutex;
+ hcd->primary_hcd = primary_hcd;
+ primary_hcd->primary_hcd = primary_hcd;
+ hcd->shared_hcd = primary_hcd;
+ primary_hcd->shared_hcd = hcd;
+ }
+
kref_init(&hcd->kref);
usb_bus_init(&hcd->self);
@@ -2184,19 +2243,53 @@ struct usb_hcd *usb_create_hcd (const struct hc_driver *driver,
#ifdef CONFIG_USB_SUSPEND
INIT_WORK(&hcd->wakeup_work, hcd_resume_work);
#endif
- mutex_init(&hcd->bandwidth_mutex);
hcd->driver = driver;
+ hcd->speed = driver->flags & HCD_MASK;
hcd->product_desc = (driver->product_desc) ? driver->product_desc :
"USB Host Controller";
return hcd;
}
+EXPORT_SYMBOL_GPL(usb_create_shared_hcd);
+
+/**
+ * usb_create_hcd - create and initialize an HCD structure
+ * @driver: HC driver that will use this hcd
+ * @dev: device for this HC, stored in hcd->self.controller
+ * @bus_name: value to store in hcd->self.bus_name
+ * Context: !in_interrupt()
+ *
+ * Allocate a struct usb_hcd, with extra space at the end for the
+ * HC driver's private data. Initialize the generic members of the
+ * hcd structure.
+ *
+ * If memory is unavailable, returns NULL.
+ */
+struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
+ struct device *dev, const char *bus_name)
+{
+ return usb_create_shared_hcd(driver, dev, bus_name, NULL);
+}
EXPORT_SYMBOL_GPL(usb_create_hcd);
+/*
+ * Roothubs that share one PCI device must also share the bandwidth mutex.
+ * Don't deallocate the bandwidth_mutex until the last shared usb_hcd is
+ * deallocated.
+ *
+ * Make sure to only deallocate the bandwidth_mutex when the primary HCD is
+ * freed. When hcd_release() is called for the non-primary HCD, set the
+ * primary_hcd's shared_hcd pointer to null (since the non-primary HCD will be
+ * freed shortly).
+ */
static void hcd_release (struct kref *kref)
{
struct usb_hcd *hcd = container_of (kref, struct usb_hcd, kref);
+ if (usb_hcd_is_primary_hcd(hcd))
+ kfree(hcd->bandwidth_mutex);
+ else
+ hcd->shared_hcd->shared_hcd = NULL;
kfree(hcd);
}
@@ -2215,6 +2308,54 @@ void usb_put_hcd (struct usb_hcd *hcd)
}
EXPORT_SYMBOL_GPL(usb_put_hcd);
+int usb_hcd_is_primary_hcd(struct usb_hcd *hcd)
+{
+ if (!hcd->primary_hcd)
+ return 1;
+ return hcd == hcd->primary_hcd;
+}
+EXPORT_SYMBOL_GPL(usb_hcd_is_primary_hcd);
+
+static int usb_hcd_request_irqs(struct usb_hcd *hcd,
+ unsigned int irqnum, unsigned long irqflags)
+{
+ int retval;
+
+ if (hcd->driver->irq) {
+
+ /* IRQF_DISABLED doesn't work as advertised when used together
+ * with IRQF_SHARED. As usb_hcd_irq() will always disable
+ * interrupts we can remove it here.
+ */
+ if (irqflags & IRQF_SHARED)
+ irqflags &= ~IRQF_DISABLED;
+
+ snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
+ hcd->driver->description, hcd->self.busnum);
+ retval = request_irq(irqnum, &usb_hcd_irq, irqflags,
+ hcd->irq_descr, hcd);
+ if (retval != 0) {
+ dev_err(hcd->self.controller,
+ "request interrupt %d failed\n",
+ irqnum);
+ return retval;
+ }
+ hcd->irq = irqnum;
+ dev_info(hcd->self.controller, "irq %d, %s 0x%08llx\n", irqnum,
+ (hcd->driver->flags & HCD_MEMORY) ?
+ "io mem" : "io base",
+ (unsigned long long)hcd->rsrc_start);
+ } else {
+ hcd->irq = -1;
+ if (hcd->rsrc_start)
+ dev_info(hcd->self.controller, "%s 0x%08llx\n",
+ (hcd->driver->flags & HCD_MEMORY) ?
+ "io mem" : "io base",
+ (unsigned long long)hcd->rsrc_start);
+ }
+ return 0;
+}
+
/**
* usb_add_hcd - finish generic HCD structure initialization and register
* @hcd: the usb_hcd structure to initialize
@@ -2255,7 +2396,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
}
hcd->self.root_hub = rhdev;
- switch (hcd->driver->flags & HCD_MASK) {
+ switch (hcd->speed) {
case HCD_USB11:
rhdev->speed = USB_SPEED_FULL;
break;
@@ -2296,38 +2437,15 @@ int usb_add_hcd(struct usb_hcd *hcd,
dev_dbg(hcd->self.controller, "supports USB remote wakeup\n");
/* enable irqs just before we start the controller */
- if (hcd->driver->irq) {
-
- /* IRQF_DISABLED doesn't work as advertised when used together
- * with IRQF_SHARED. As usb_hcd_irq() will always disable
- * interrupts we can remove it here.
- */
- if (irqflags & IRQF_SHARED)
- irqflags &= ~IRQF_DISABLED;
-
- snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
- hcd->driver->description, hcd->self.busnum);
- if ((retval = request_irq(irqnum, &usb_hcd_irq, irqflags,
- hcd->irq_descr, hcd)) != 0) {
- dev_err(hcd->self.controller,
- "request interrupt %d failed\n", irqnum);
+ if (usb_hcd_is_primary_hcd(hcd)) {
+ retval = usb_hcd_request_irqs(hcd, irqnum, irqflags);
+ if (retval)
goto err_request_irq;
- }
- hcd->irq = irqnum;
- dev_info(hcd->self.controller, "irq %d, %s 0x%08llx\n", irqnum,
- (hcd->driver->flags & HCD_MEMORY) ?
- "io mem" : "io base",
- (unsigned long long)hcd->rsrc_start);
- } else {
- hcd->irq = -1;
- if (hcd->rsrc_start)
- dev_info(hcd->self.controller, "%s 0x%08llx\n",
- (hcd->driver->flags & HCD_MEMORY) ?
- "io mem" : "io base",
- (unsigned long long)hcd->rsrc_start);
}
- if ((retval = hcd->driver->start(hcd)) < 0) {
+ hcd->state = HC_STATE_RUNNING;
+ retval = hcd->driver->start(hcd);
+ if (retval < 0) {
dev_err(hcd->self.controller, "startup error %d\n", retval);
goto err_hcd_driver_start;
}
@@ -2370,7 +2488,7 @@ err_register_root_hub:
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
del_timer_sync(&hcd->rh_timer);
err_hcd_driver_start:
- if (hcd->irq >= 0)
+ if (usb_hcd_is_primary_hcd(hcd) && hcd->irq >= 0)
free_irq(irqnum, hcd);
err_request_irq:
err_hcd_driver_setup:
@@ -2434,8 +2552,10 @@ void usb_remove_hcd(struct usb_hcd *hcd)
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
del_timer_sync(&hcd->rh_timer);
- if (hcd->irq >= 0)
- free_irq(hcd->irq, hcd);
+ if (usb_hcd_is_primary_hcd(hcd)) {
+ if (hcd->irq >= 0)
+ free_irq(hcd->irq, hcd);
+ }
usb_put_dev(hcd->self.root_hub);
usb_deregister_bus(&hcd->self);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 0f299b7aad6..500dff4b0ee 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -82,6 +82,10 @@ struct usb_hub {
void **port_owners;
};
+static inline int hub_is_superspeed(struct usb_device *hdev)
+{
+ return (hdev->descriptor.bDeviceProtocol == 3);
+}
/* Protect struct usb_device->state and ->children members
* Note: Both are also protected by ->dev.sem, except that ->state can
@@ -151,14 +155,14 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
static int usb_reset_and_verify_device(struct usb_device *udev);
-static inline char *portspeed(int portstatus)
+static inline char *portspeed(struct usb_hub *hub, int portstatus)
{
+ if (hub_is_superspeed(hub->hdev))
+ return "5.0 Gb/s";
if (portstatus & USB_PORT_STAT_HIGH_SPEED)
return "480 Mb/s";
else if (portstatus & USB_PORT_STAT_LOW_SPEED)
return "1.5 Mb/s";
- else if (portstatus & USB_PORT_STAT_SUPER_SPEED)
- return "5.0 Gb/s";
else
return "12 Mb/s";
}
@@ -172,14 +176,23 @@ static struct usb_hub *hdev_to_hub(struct usb_device *hdev)
}
/* USB 2.0 spec Section 11.24.4.5 */
-static int get_hub_descriptor(struct usb_device *hdev, void *data, int size)
+static int get_hub_descriptor(struct usb_device *hdev, void *data)
{
- int i, ret;
+ int i, ret, size;
+ unsigned dtype;
+
+ if (hub_is_superspeed(hdev)) {
+ dtype = USB_DT_SS_HUB;
+ size = USB_DT_SS_HUB_SIZE;
+ } else {
+ dtype = USB_DT_HUB;
+ size = sizeof(struct usb_hub_descriptor);
+ }
for (i = 0; i < 3; i++) {
ret = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN | USB_RT_HUB,
- USB_DT_HUB << 8, 0, data, size,
+ dtype << 8, 0, data, size,
USB_CTRL_GET_TIMEOUT);
if (ret >= (USB_DT_HUB_NONVAR_SIZE + 2))
return ret;
@@ -365,6 +378,16 @@ static int hub_port_status(struct usb_hub *hub, int port1,
} else {
*status = le16_to_cpu(hub->status->port.wPortStatus);
*change = le16_to_cpu(hub->status->port.wPortChange);
+
+ if ((hub->hdev->parent != NULL) &&
+ hub_is_superspeed(hub->hdev)) {
+ /* Translate the USB 3 port status */
+ u16 tmp = *status & USB_SS_PORT_STAT_MASK;
+ if (*status & USB_SS_PORT_STAT_POWER)
+ tmp |= USB_PORT_STAT_POWER;
+ *status = tmp;
+ }
+
ret = 0;
}
mutex_unlock(&hub->status_mutex);
@@ -607,7 +630,7 @@ static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
if (hdev->children[port1-1] && set_state)
usb_set_device_state(hdev->children[port1-1],
USB_STATE_NOTATTACHED);
- if (!hub->error)
+ if (!hub->error && !hub_is_superspeed(hub->hdev))
ret = clear_port_feature(hdev, port1, USB_PORT_FEAT_ENABLE);
if (ret)
dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n",
@@ -616,7 +639,7 @@ static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
}
/*
- * Disable a port and mark a logical connnect-change event, so that some
+ * Disable a port and mark a logical connect-change event, so that some
* time later khubd will disconnect() any existing usb_device on the port
* and will re-enumerate if there actually is a device attached.
*/
@@ -769,12 +792,8 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
* USB3 protocol ports will automatically transition
* to Enabled state when detect an USB3.0 device attach.
* Do not disable USB3 protocol ports.
- * FIXME: USB3 root hub and external hubs are treated
- * differently here.
*/
- if (hdev->descriptor.bDeviceProtocol != 3 ||
- (!hdev->parent &&
- !(portstatus & USB_PORT_STAT_SUPER_SPEED))) {
+ if (!hub_is_superspeed(hdev)) {
clear_port_feature(hdev, port1,
USB_PORT_FEAT_ENABLE);
portstatus &= ~USB_PORT_STAT_ENABLE;
@@ -795,6 +814,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_ENABLE);
}
+ if (portchange & USB_PORT_STAT_C_LINK_STATE) {
+ need_debounce_delay = true;
+ clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_PORT_LINK_STATE);
+ }
/* We can forget about a "removed" device when there's a
* physical disconnect or the connect status changes.
@@ -964,12 +988,23 @@ static int hub_configure(struct usb_hub *hub,
goto fail;
}
+ if (hub_is_superspeed(hdev) && (hdev->parent != NULL)) {
+ ret = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
+ HUB_SET_DEPTH, USB_RT_HUB,
+ hdev->level - 1, 0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+
+ if (ret < 0) {
+ message = "can't set hub depth";
+ goto fail;
+ }
+ }
+
/* Request the entire hub descriptor.
* hub->descriptor can handle USB_MAXCHILDREN ports,
* but the hub can/will return fewer bytes here.
*/
- ret = get_hub_descriptor(hdev, hub->descriptor,
- sizeof(*hub->descriptor));
+ ret = get_hub_descriptor(hdev, hub->descriptor);
if (ret < 0) {
message = "can't read hub descriptor";
goto fail;
@@ -991,12 +1026,14 @@ static int hub_configure(struct usb_hub *hub,
wHubCharacteristics = le16_to_cpu(hub->descriptor->wHubCharacteristics);
- if (wHubCharacteristics & HUB_CHAR_COMPOUND) {
+ /* FIXME for USB 3.0, skip for now */
+ if ((wHubCharacteristics & HUB_CHAR_COMPOUND) &&
+ !(hub_is_superspeed(hdev))) {
int i;
char portstr [USB_MAXCHILDREN + 1];
for (i = 0; i < hdev->maxchild; i++)
- portstr[i] = hub->descriptor->DeviceRemovable
+ portstr[i] = hub->descriptor->u.hs.DeviceRemovable
[((i + 1) / 8)] & (1 << ((i + 1) % 8))
? 'F' : 'R';
portstr[hdev->maxchild] = 0;
@@ -1253,8 +1290,14 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
desc = intf->cur_altsetting;
hdev = interface_to_usbdev(intf);
- /* Hubs have proper suspend/resume support */
- usb_enable_autosuspend(hdev);
+ /* Hubs have proper suspend/resume support. USB 3.0 device suspend is
+ * different from USB 2.0/1.1 device suspend, and unfortunately we
+ * don't support it yet. So leave autosuspend disabled for USB 3.0
+ * external hubs for now. Enable autosuspend for USB 3.0 roothubs,
+ * since that isn't a "real" hub.
+ */
+ if (!hub_is_superspeed(hdev) || !hdev->parent)
+ usb_enable_autosuspend(hdev);
if (hdev->level == MAX_TOPO_LEVEL) {
dev_err(&intf->dev,
@@ -1499,6 +1542,13 @@ void usb_set_device_state(struct usb_device *udev,
EXPORT_SYMBOL_GPL(usb_set_device_state);
/*
+ * Choose a device number.
+ *
+ * Device numbers are used as filenames in usbfs. On USB-1.1 and
+ * USB-2.0 buses they are also used as device addresses, however on
+ * USB-3.0 buses the address is assigned by the controller hardware
+ * and it usually is not the same as the device number.
+ *
* WUSB devices are simple: they have no hubs behind, so the mapping
* device <-> virtual port number becomes 1:1. Why? to simplify the
* life of the device connection logic in
@@ -1520,7 +1570,7 @@ EXPORT_SYMBOL_GPL(usb_set_device_state);
* the HCD must setup data structures before issuing a set address
* command to the hardware.
*/
-static void choose_address(struct usb_device *udev)
+static void choose_devnum(struct usb_device *udev)
{
int devnum;
struct usb_bus *bus = udev->bus;
@@ -1545,7 +1595,7 @@ static void choose_address(struct usb_device *udev)
}
}
-static void release_address(struct usb_device *udev)
+static void release_devnum(struct usb_device *udev)
{
if (udev->devnum > 0) {
clear_bit(udev->devnum, udev->bus->devmap.devicemap);
@@ -1553,7 +1603,7 @@ static void release_address(struct usb_device *udev)
}
}
-static void update_address(struct usb_device *udev, int devnum)
+static void update_devnum(struct usb_device *udev, int devnum)
{
/* The address for a WUSB device is managed by wusbcore. */
if (!udev->wusb)
@@ -1600,7 +1650,8 @@ void usb_disconnect(struct usb_device **pdev)
* this quiesces everyting except pending urbs.
*/
usb_set_device_state(udev, USB_STATE_NOTATTACHED);
- dev_info (&udev->dev, "USB disconnect, address %d\n", udev->devnum);
+ dev_info(&udev->dev, "USB disconnect, device number %d\n",
+ udev->devnum);
usb_lock_device(udev);
@@ -1630,7 +1681,7 @@ void usb_disconnect(struct usb_device **pdev)
/* Free the device number and delete the parent's children[]
* (or root_hub) pointer.
*/
- release_address(udev);
+ release_devnum(udev);
/* Avoid races with recursively_mark_NOTATTACHED() */
spin_lock_irq(&device_state_lock);
@@ -2015,7 +2066,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
(portstatus & USB_PORT_STAT_ENABLE)) {
if (hub_is_wusb(hub))
udev->speed = USB_SPEED_WIRELESS;
- else if (portstatus & USB_PORT_STAT_SUPER_SPEED)
+ else if (hub_is_superspeed(hub->hdev))
udev->speed = USB_SPEED_SUPER;
else if (portstatus & USB_PORT_STAT_HIGH_SPEED)
udev->speed = USB_SPEED_HIGH;
@@ -2071,7 +2122,7 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
case 0:
/* TRSTRCY = 10 ms; plus some extra */
msleep(10 + 40);
- update_address(udev, 0);
+ update_devnum(udev, 0);
if (hcd->driver->reset_device) {
status = hcd->driver->reset_device(hcd, udev);
if (status < 0) {
@@ -2634,7 +2685,7 @@ static int hub_set_address(struct usb_device *udev, int devnum)
USB_REQ_SET_ADDRESS, 0, devnum, 0,
NULL, 0, USB_CTRL_SET_TIMEOUT);
if (retval == 0) {
- update_address(udev, devnum);
+ update_devnum(udev, devnum);
/* Device now using proper address. */
usb_set_device_state(udev, USB_STATE_ADDRESS);
usb_ep0_reinit(udev);
@@ -2739,9 +2790,9 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
}
if (udev->speed != USB_SPEED_SUPER)
dev_info(&udev->dev,
- "%s %s speed %sUSB device using %s and address %d\n",
+ "%s %s speed %sUSB device number %d using %s\n",
(udev->config) ? "reset" : "new", speed, type,
- udev->bus->controller->driver->name, devnum);
+ devnum, udev->bus->controller->driver->name);
/* Set up TT records, if needed */
if (hdev->tt) {
@@ -2771,10 +2822,6 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
* value.
*/
for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
- /*
- * An xHCI controller cannot send any packets to a device until
- * a set address command successfully completes.
- */
if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3)) {
struct usb_device_descriptor *buf;
int r = 0;
@@ -2857,9 +2904,9 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
if (udev->speed == USB_SPEED_SUPER) {
devnum = udev->devnum;
dev_info(&udev->dev,
- "%s SuperSpeed USB device using %s and address %d\n",
+ "%s SuperSpeed USB device number %d using %s\n",
(udev->config) ? "reset" : "new",
- udev->bus->controller->driver->name, devnum);
+ devnum, udev->bus->controller->driver->name);
}
/* cope with hardware quirkiness:
@@ -2922,7 +2969,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
fail:
if (retval) {
hub_port_disable(hub, port1, 0);
- update_address(udev, devnum); /* for disconnect processing */
+ update_devnum(udev, devnum); /* for disconnect processing */
}
mutex_unlock(&usb_address0_mutex);
return retval;
@@ -3013,7 +3060,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
dev_dbg (hub_dev,
"port %d, status %04x, change %04x, %s\n",
- port1, portstatus, portchange, portspeed (portstatus));
+ port1, portstatus, portchange, portspeed(hub, portstatus));
if (hub->has_indicators) {
set_port_led(hub, port1, HUB_LED_AUTO);
@@ -3114,32 +3161,13 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
udev->level = hdev->level + 1;
udev->wusb = hub_is_wusb(hub);
- /*
- * USB 3.0 devices are reset automatically before the connect
- * port status change appears, and the root hub port status
- * shows the correct speed. We also get port change
- * notifications for USB 3.0 devices from the USB 3.0 portion of
- * an external USB 3.0 hub, but this isn't handled correctly yet
- * FIXME.
- */
-
- if (!(hcd->driver->flags & HCD_USB3))
- udev->speed = USB_SPEED_UNKNOWN;
- else if ((hdev->parent == NULL) &&
- (portstatus & USB_PORT_STAT_SUPER_SPEED))
+ /* Only USB 3.0 devices are connected to SuperSpeed hubs. */
+ if (hub_is_superspeed(hub->hdev))
udev->speed = USB_SPEED_SUPER;
else
udev->speed = USB_SPEED_UNKNOWN;
- /*
- * Set the address.
- * Note xHCI needs to issue an address device command later
- * in the hub_port_init sequence for SS/HS/FS/LS devices,
- * and xHC will assign an address to the device. But use
- * kernel assigned address here, to avoid any address conflict
- * issue.
- */
- choose_address(udev);
+ choose_devnum(udev);
if (udev->devnum <= 0) {
status = -ENOTCONN; /* Don't retry */
goto loop;
@@ -3231,7 +3259,7 @@ loop_disable:
hub_port_disable(hub, port1, 1);
loop:
usb_ep0_reinit(udev);
- release_address(udev);
+ release_devnum(udev);
hub_free_dev(udev);
usb_put_dev(udev);
if ((status == -ENOTCONN) || (status == -ENOTSUPP))
@@ -3408,12 +3436,19 @@ static void hub_events(void)
}
if (portchange & USB_PORT_STAT_C_OVERCURRENT) {
- dev_err (hub_dev,
- "over-current change on port %d\n",
- i);
+ u16 status = 0;
+ u16 unused;
+
+ dev_dbg(hub_dev, "over-current change on port "
+ "%d\n", i);
clear_port_feature(hdev, i,
USB_PORT_FEAT_C_OVER_CURRENT);
+ msleep(100); /* Cool down */
hub_power_on(hub, true);
+ hub_port_status(hub, i, &status, &unused);
+ if (status & USB_PORT_STAT_OVERCURRENT)
+ dev_err(hub_dev, "over-current "
+ "condition on port %d\n", i);
}
if (portchange & USB_PORT_STAT_C_RESET) {
@@ -3423,6 +3458,25 @@ static void hub_events(void)
clear_port_feature(hdev, i,
USB_PORT_FEAT_C_RESET);
}
+ if ((portchange & USB_PORT_STAT_C_BH_RESET) &&
+ hub_is_superspeed(hub->hdev)) {
+ dev_dbg(hub_dev,
+ "warm reset change on port %d\n",
+ i);
+ clear_port_feature(hdev, i,
+ USB_PORT_FEAT_C_BH_PORT_RESET);
+ }
+ if (portchange & USB_PORT_STAT_C_LINK_STATE) {
+ clear_port_feature(hub->hdev, i,
+ USB_PORT_FEAT_C_PORT_LINK_STATE);
+ }
+ if (portchange & USB_PORT_STAT_C_CONFIG_ERROR) {
+ dev_warn(hub_dev,
+ "config error on port %d\n",
+ i);
+ clear_port_feature(hub->hdev, i,
+ USB_PORT_FEAT_C_PORT_CONFIG_ERROR);
+ }
if (connect_change)
hub_port_connect_change(hub, i,
@@ -3445,10 +3499,17 @@ static void hub_events(void)
hub->limited_power = 0;
}
if (hubchange & HUB_CHANGE_OVERCURRENT) {
- dev_dbg (hub_dev, "overcurrent change\n");
- msleep(500); /* Cool down */
+ u16 status = 0;
+ u16 unused;
+
+ dev_dbg(hub_dev, "over-current change\n");
clear_hub_feature(hdev, C_HUB_OVER_CURRENT);
+ msleep(500); /* Cool down */
hub_power_on(hub, true);
+ hub_hub_status(hub, &status, &unused);
+ if (status & HUB_STATUS_OVERCURRENT)
+ dev_err(hub_dev, "over-current "
+ "condition\n");
}
}
@@ -3697,13 +3758,13 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
if (!udev->actconfig)
goto done;
- mutex_lock(&hcd->bandwidth_mutex);
+ mutex_lock(hcd->bandwidth_mutex);
ret = usb_hcd_alloc_bandwidth(udev, udev->actconfig, NULL, NULL);
if (ret < 0) {
dev_warn(&udev->dev,
"Busted HC? Not enough HCD resources for "
"old configuration.\n");
- mutex_unlock(&hcd->bandwidth_mutex);
+ mutex_unlock(hcd->bandwidth_mutex);
goto re_enumerate;
}
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
@@ -3714,10 +3775,10 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
dev_err(&udev->dev,
"can't restore configuration #%d (error=%d)\n",
udev->actconfig->desc.bConfigurationValue, ret);
- mutex_unlock(&hcd->bandwidth_mutex);
+ mutex_unlock(hcd->bandwidth_mutex);
goto re_enumerate;
}
- mutex_unlock(&hcd->bandwidth_mutex);
+ mutex_unlock(hcd->bandwidth_mutex);
usb_set_device_state(udev, USB_STATE_CONFIGURED);
/* Put interfaces back into the same altsettings as before.
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 83248742382..5701e857392 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1284,12 +1284,12 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
/* Make sure we have enough bandwidth for this alternate interface.
* Remove the current alt setting and add the new alt setting.
*/
- mutex_lock(&hcd->bandwidth_mutex);
+ mutex_lock(hcd->bandwidth_mutex);
ret = usb_hcd_alloc_bandwidth(dev, NULL, iface->cur_altsetting, alt);
if (ret < 0) {
dev_info(&dev->dev, "Not enough bandwidth for altsetting %d\n",
alternate);
- mutex_unlock(&hcd->bandwidth_mutex);
+ mutex_unlock(hcd->bandwidth_mutex);
return ret;
}
@@ -1311,10 +1311,10 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
} else if (ret < 0) {
/* Re-instate the old alt setting */
usb_hcd_alloc_bandwidth(dev, NULL, alt, iface->cur_altsetting);
- mutex_unlock(&hcd->bandwidth_mutex);
+ mutex_unlock(hcd->bandwidth_mutex);
return ret;
}
- mutex_unlock(&hcd->bandwidth_mutex);
+ mutex_unlock(hcd->bandwidth_mutex);
/* FIXME drivers shouldn't need to replicate/bugfix the logic here
* when they implement async or easily-killable versions of this or
@@ -1413,7 +1413,7 @@ int usb_reset_configuration(struct usb_device *dev)
config = dev->actconfig;
retval = 0;
- mutex_lock(&hcd->bandwidth_mutex);
+ mutex_lock(hcd->bandwidth_mutex);
/* Make sure we have enough bandwidth for each alternate setting 0 */
for (i = 0; i < config->desc.bNumInterfaces; i++) {
struct usb_interface *intf = config->interface[i];
@@ -1442,7 +1442,7 @@ reset_old_alts:
usb_hcd_alloc_bandwidth(dev, NULL,
alt, intf->cur_altsetting);
}
- mutex_unlock(&hcd->bandwidth_mutex);
+ mutex_unlock(hcd->bandwidth_mutex);
return retval;
}
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
@@ -1451,7 +1451,7 @@ reset_old_alts:
NULL, 0, USB_CTRL_SET_TIMEOUT);
if (retval < 0)
goto reset_old_alts;
- mutex_unlock(&hcd->bandwidth_mutex);
+ mutex_unlock(hcd->bandwidth_mutex);
/* re-init hc/hcd interface/endpoint state */
for (i = 0; i < config->desc.bNumInterfaces; i++) {
@@ -1739,10 +1739,10 @@ free_interfaces:
* host controller will not allow submissions to dropped endpoints. If
* this call fails, the device state is unchanged.
*/
- mutex_lock(&hcd->bandwidth_mutex);
+ mutex_lock(hcd->bandwidth_mutex);
ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
if (ret < 0) {
- mutex_unlock(&hcd->bandwidth_mutex);
+ mutex_unlock(hcd->bandwidth_mutex);
usb_autosuspend_device(dev);
goto free_interfaces;
}
@@ -1761,11 +1761,11 @@ free_interfaces:
if (!cp) {
usb_set_device_state(dev, USB_STATE_ADDRESS);
usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
- mutex_unlock(&hcd->bandwidth_mutex);
+ mutex_unlock(hcd->bandwidth_mutex);
usb_autosuspend_device(dev);
goto free_interfaces;
}
- mutex_unlock(&hcd->bandwidth_mutex);
+ mutex_unlock(hcd->bandwidth_mutex);
usb_set_device_state(dev, USB_STATE_CONFIGURED);
/* Initialize the new interface structures and the
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index b975450f403..a9cf484ecae 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -122,6 +122,19 @@ static inline int is_usb_device_driver(struct device_driver *drv)
for_devices;
}
+/* translate USB error codes to codes user space understands */
+static inline int usb_translate_errors(int error_code)
+{
+ switch (error_code) {
+ case 0:
+ case -ENOMEM:
+ case -ENODEV:
+ return error_code;
+ default:
+ return -EIO;
+ }
+}
+
/* for labeling diagnostics */
extern const char *usbcore_name;
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index 94ecdbc758c..0bc06e2bcfc 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -601,7 +601,7 @@ try_again:
dbgp_printk("dbgp_bulk_write failed: %d\n", ret);
goto err;
}
- dbgp_printk("small write doned\n");
+ dbgp_printk("small write done\n");
dbgp_not_safe = 0;
return 0;
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index d50099675f2..bfde50e20b3 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -176,6 +176,18 @@ config USB_FSL_USB2
default USB_GADGET
select USB_GADGET_SELECTED
+config USB_GADGET_FUSB300
+ boolean "Faraday FUSB300 USB Peripheral Controller"
+ select USB_GADGET_DUALSPEED
+ help
+ Faraday usb device controller FUSB300 driver
+
+config USB_FUSB300
+ tristate
+ depends on USB_GADGET_FUSB300
+ default USB_GADGET
+ select USB_GADGET_SELECTED
+
config USB_GADGET_LH7A40X
boolean "LH7A40X"
depends on ARCH_LH7A40X
@@ -540,7 +552,7 @@ config USB_GADGET_CI13XXX_MSM
boolean "MIPS USB CI13xxx for MSM"
depends on ARCH_MSM
select USB_GADGET_DUALSPEED
- select USB_MSM_OTG_72K
+ select USB_MSM_OTG
help
MSM SoC has chipidea USB controller. This driver uses
ci13xxx_udc core.
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 55f5e8ae592..305286e181d 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_USB_EG20T) += pch_udc.o
obj-$(CONFIG_USB_PXA_U2O) += mv_udc.o
mv_udc-y := mv_udc_core.o mv_udc_phy.o
obj-$(CONFIG_USB_CI13XXX_MSM) += ci13xxx_msm.o
+obj-$(CONFIG_USB_FUSB300) += fusb300_udc.o
#
# USB gadget drivers
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index bdec36acd0f..bb8ddf0469f 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -1798,8 +1798,10 @@ static int __init at91udc_probe(struct platform_device *pdev)
}
retval = device_register(&udc->gadget.dev);
- if (retval < 0)
+ if (retval < 0) {
+ put_device(&udc->gadget.dev);
goto fail0b;
+ }
/* don't do anything until we have both gadget driver and VBUS */
clk_enable(udc->iclk);
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index a1c67ae1572..e09178bc145 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -435,20 +435,6 @@ static int hw_ep_get_halt(int num, int dir)
}
/**
- * hw_ep_is_primed: test if endpoint is primed (execute without interruption)
- * @num: endpoint number
- * @dir: endpoint direction
- *
- * This function returns true if endpoint primed
- */
-static int hw_ep_is_primed(int num, int dir)
-{
- u32 reg = hw_cread(CAP_ENDPTPRIME, ~0) | hw_cread(CAP_ENDPTSTAT, ~0);
-
- return test_bit(hw_ep_bit(num, dir), (void *)&reg);
-}
-
-/**
* hw_test_and_clear_setup_status: test & clear setup status (execute without
* interruption)
* @n: bit number (endpoint)
@@ -472,10 +458,6 @@ static int hw_ep_prime(int num, int dir, int is_ctrl)
{
int n = hw_ep_bit(num, dir);
- /* the caller should flush first */
- if (hw_ep_is_primed(num, dir))
- return -EBUSY;
-
if (is_ctrl && dir == RX && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num)))
return -EAGAIN;
@@ -1434,6 +1416,8 @@ static inline u8 _usb_addr(struct ci13xxx_ep *ep)
static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
{
unsigned i;
+ int ret = 0;
+ unsigned length = mReq->req.length;
trace("%p, %p", mEp, mReq);
@@ -1441,53 +1425,91 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
if (mReq->req.status == -EALREADY)
return -EALREADY;
- if (hw_ep_is_primed(mEp->num, mEp->dir))
- return -EBUSY;
-
mReq->req.status = -EALREADY;
-
- if (mReq->req.length && !mReq->req.dma) {
+ if (length && !mReq->req.dma) {
mReq->req.dma = \
dma_map_single(mEp->device, mReq->req.buf,
- mReq->req.length, mEp->dir ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ length, mEp->dir ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
if (mReq->req.dma == 0)
return -ENOMEM;
mReq->map = 1;
}
+ if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0)) {
+ mReq->zptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC,
+ &mReq->zdma);
+ if (mReq->zptr == NULL) {
+ if (mReq->map) {
+ dma_unmap_single(mEp->device, mReq->req.dma,
+ length, mEp->dir ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
+ mReq->req.dma = 0;
+ mReq->map = 0;
+ }
+ return -ENOMEM;
+ }
+ memset(mReq->zptr, 0, sizeof(*mReq->zptr));
+ mReq->zptr->next = TD_TERMINATE;
+ mReq->zptr->token = TD_STATUS_ACTIVE;
+ if (!mReq->req.no_interrupt)
+ mReq->zptr->token |= TD_IOC;
+ }
/*
* TD configuration
* TODO - handle requests which spawns into several TDs
*/
memset(mReq->ptr, 0, sizeof(*mReq->ptr));
- mReq->ptr->next |= TD_TERMINATE;
- mReq->ptr->token = mReq->req.length << ffs_nr(TD_TOTAL_BYTES);
+ mReq->ptr->token = length << ffs_nr(TD_TOTAL_BYTES);
mReq->ptr->token &= TD_TOTAL_BYTES;
- mReq->ptr->token |= TD_IOC;
mReq->ptr->token |= TD_STATUS_ACTIVE;
+ if (mReq->zptr) {
+ mReq->ptr->next = mReq->zdma;
+ } else {
+ mReq->ptr->next = TD_TERMINATE;
+ if (!mReq->req.no_interrupt)
+ mReq->ptr->token |= TD_IOC;
+ }
mReq->ptr->page[0] = mReq->req.dma;
for (i = 1; i < 5; i++)
mReq->ptr->page[i] =
(mReq->req.dma + i * CI13XXX_PAGE_SIZE) & ~TD_RESERVED_MASK;
- /*
- * QH configuration
- * At this point it's guaranteed exclusive access to qhead
- * (endpt is not primed) so it's no need to use tripwire
- */
+ if (!list_empty(&mEp->qh.queue)) {
+ struct ci13xxx_req *mReqPrev;
+ int n = hw_ep_bit(mEp->num, mEp->dir);
+ int tmp_stat;
+
+ mReqPrev = list_entry(mEp->qh.queue.prev,
+ struct ci13xxx_req, queue);
+ if (mReqPrev->zptr)
+ mReqPrev->zptr->next = mReq->dma & TD_ADDR_MASK;
+ else
+ mReqPrev->ptr->next = mReq->dma & TD_ADDR_MASK;
+ wmb();
+ if (hw_cread(CAP_ENDPTPRIME, BIT(n)))
+ goto done;
+ do {
+ hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
+ tmp_stat = hw_cread(CAP_ENDPTSTAT, BIT(n));
+ } while (!hw_cread(CAP_USBCMD, USBCMD_ATDTW));
+ hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, 0);
+ if (tmp_stat)
+ goto done;
+ }
+
+ /* QH configuration */
mEp->qh.ptr->td.next = mReq->dma; /* TERMINATE = 0 */
mEp->qh.ptr->td.token &= ~TD_STATUS; /* clear status */
- if (mReq->req.zero == 0)
- mEp->qh.ptr->cap |= QH_ZLT;
- else
- mEp->qh.ptr->cap &= ~QH_ZLT;
+ mEp->qh.ptr->cap |= QH_ZLT;
wmb(); /* synchronize before ep prime */
- return hw_ep_prime(mEp->num, mEp->dir,
+ ret = hw_ep_prime(mEp->num, mEp->dir,
mEp->type == USB_ENDPOINT_XFER_CONTROL);
+done:
+ return ret;
}
/**
@@ -1504,8 +1526,15 @@ static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
if (mReq->req.status != -EALREADY)
return -EINVAL;
- if (hw_ep_is_primed(mEp->num, mEp->dir))
- hw_ep_flush(mEp->num, mEp->dir);
+ if ((TD_STATUS_ACTIVE & mReq->ptr->token) != 0)
+ return -EBUSY;
+
+ if (mReq->zptr) {
+ if ((TD_STATUS_ACTIVE & mReq->zptr->token) != 0)
+ return -EBUSY;
+ dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma);
+ mReq->zptr = NULL;
+ }
mReq->req.status = 0;
@@ -1517,9 +1546,7 @@ static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
}
mReq->req.status = mReq->ptr->token & TD_STATUS;
- if ((TD_STATUS_ACTIVE & mReq->req.status) != 0)
- mReq->req.status = -ECONNRESET;
- else if ((TD_STATUS_HALTED & mReq->req.status) != 0)
+ if ((TD_STATUS_HALTED & mReq->req.status) != 0)
mReq->req.status = -1;
else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0)
mReq->req.status = -1;
@@ -1581,12 +1608,19 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
{
struct usb_ep *ep;
struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
+ unsigned long flags;
trace("%p", gadget);
if (gadget == NULL)
return -EINVAL;
+ spin_lock_irqsave(udc->lock, flags);
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->remote_wakeup = 0;
+ udc->suspended = 0;
+ spin_unlock_irqrestore(udc->lock, flags);
+
/* flush all endpoints */
gadget_for_each_ep(ep, gadget) {
usb_ep_fifo_flush(ep);
@@ -1720,7 +1754,8 @@ __acquires(mEp->lock)
}
if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
- /* TODO: D1 - Remote Wakeup; D0 - Self Powered */
+ /* Assume that device is bus powered for now. */
+ *((u16 *)req->buf) = _udc->remote_wakeup << 1;
retval = 0;
} else if ((setup->bRequestType & USB_RECIP_MASK) \
== USB_RECIP_ENDPOINT) {
@@ -1749,6 +1784,28 @@ __acquires(mEp->lock)
}
/**
+ * isr_setup_status_complete: setup_status request complete function
+ * @ep: endpoint
+ * @req: request handled
+ *
+ * Caller must release lock. Put the port in test mode if test mode
+ * feature is selected.
+ */
+static void
+isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct ci13xxx *udc = req->context;
+ unsigned long flags;
+
+ trace("%p, %p", ep, req);
+
+ spin_lock_irqsave(udc->lock, flags);
+ if (udc->test_mode)
+ hw_port_test_set(udc->test_mode);
+ spin_unlock_irqrestore(udc->lock, flags);
+}
+
+/**
* isr_setup_status_phase: queues the status phase of a setup transation
* @udc: udc struct
*
@@ -1764,6 +1821,8 @@ __acquires(mEp->lock)
trace("%p", udc);
mEp = (udc->ep0_dir == TX) ? &udc->ep0out : &udc->ep0in;
+ udc->status->context = udc;
+ udc->status->complete = isr_setup_status_complete;
spin_unlock(mEp->lock);
retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC);
@@ -1783,7 +1842,7 @@ static int isr_tr_complete_low(struct ci13xxx_ep *mEp)
__releases(mEp->lock)
__acquires(mEp->lock)
{
- struct ci13xxx_req *mReq;
+ struct ci13xxx_req *mReq, *mReqTemp;
int retval;
trace("%p", mEp);
@@ -1791,34 +1850,25 @@ __acquires(mEp->lock)
if (list_empty(&mEp->qh.queue))
return -EINVAL;
- /* pop oldest request */
- mReq = list_entry(mEp->qh.queue.next,
- struct ci13xxx_req, queue);
- list_del_init(&mReq->queue);
-
- retval = _hardware_dequeue(mEp, mReq);
- if (retval < 0) {
- dbg_event(_usb_addr(mEp), "DONE", retval);
- goto done;
- }
-
- dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
-
- if (!list_empty(&mEp->qh.queue)) {
- struct ci13xxx_req* mReqEnq;
-
- mReqEnq = list_entry(mEp->qh.queue.next,
- struct ci13xxx_req, queue);
- _hardware_enqueue(mEp, mReqEnq);
+ list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue,
+ queue) {
+ retval = _hardware_dequeue(mEp, mReq);
+ if (retval < 0)
+ break;
+ list_del_init(&mReq->queue);
+ dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
+ if (mReq->req.complete != NULL) {
+ spin_unlock(mEp->lock);
+ mReq->req.complete(&mEp->ep, &mReq->req);
+ spin_lock(mEp->lock);
+ }
}
- if (mReq->req.complete != NULL) {
- spin_unlock(mEp->lock);
- mReq->req.complete(&mEp->ep, &mReq->req);
- spin_lock(mEp->lock);
- }
+ if (retval == EBUSY)
+ retval = 0;
+ if (retval < 0)
+ dbg_event(_usb_addr(mEp), "DONE", retval);
- done:
return retval;
}
@@ -1833,6 +1883,7 @@ __releases(udc->lock)
__acquires(udc->lock)
{
unsigned i;
+ u8 tmode = 0;
trace("%p", udc);
@@ -1895,22 +1946,32 @@ __acquires(udc->lock)
switch (req.bRequest) {
case USB_REQ_CLEAR_FEATURE:
- if (type != (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
- le16_to_cpu(req.wValue) != USB_ENDPOINT_HALT)
- goto delegate;
- if (req.wLength != 0)
- break;
- num = le16_to_cpu(req.wIndex);
- num &= USB_ENDPOINT_NUMBER_MASK;
- if (!udc->ci13xxx_ep[num].wedge) {
- spin_unlock(udc->lock);
- err = usb_ep_clear_halt(
- &udc->ci13xxx_ep[num].ep);
- spin_lock(udc->lock);
- if (err)
+ if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
+ le16_to_cpu(req.wValue) ==
+ USB_ENDPOINT_HALT) {
+ if (req.wLength != 0)
break;
+ num = le16_to_cpu(req.wIndex);
+ num &= USB_ENDPOINT_NUMBER_MASK;
+ if (!udc->ci13xxx_ep[num].wedge) {
+ spin_unlock(udc->lock);
+ err = usb_ep_clear_halt(
+ &udc->ci13xxx_ep[num].ep);
+ spin_lock(udc->lock);
+ if (err)
+ break;
+ }
+ err = isr_setup_status_phase(udc);
+ } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
+ le16_to_cpu(req.wValue) ==
+ USB_DEVICE_REMOTE_WAKEUP) {
+ if (req.wLength != 0)
+ break;
+ udc->remote_wakeup = 0;
+ err = isr_setup_status_phase(udc);
+ } else {
+ goto delegate;
}
- err = isr_setup_status_phase(udc);
break;
case USB_REQ_GET_STATUS:
if (type != (USB_DIR_IN|USB_RECIP_DEVICE) &&
@@ -1934,20 +1995,48 @@ __acquires(udc->lock)
err = isr_setup_status_phase(udc);
break;
case USB_REQ_SET_FEATURE:
- if (type != (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
- le16_to_cpu(req.wValue) != USB_ENDPOINT_HALT)
- goto delegate;
- if (req.wLength != 0)
- break;
- num = le16_to_cpu(req.wIndex);
- num &= USB_ENDPOINT_NUMBER_MASK;
+ if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
+ le16_to_cpu(req.wValue) ==
+ USB_ENDPOINT_HALT) {
+ if (req.wLength != 0)
+ break;
+ num = le16_to_cpu(req.wIndex);
+ num &= USB_ENDPOINT_NUMBER_MASK;
- spin_unlock(udc->lock);
- err = usb_ep_set_halt(&udc->ci13xxx_ep[num].ep);
- spin_lock(udc->lock);
- if (err)
- break;
- err = isr_setup_status_phase(udc);
+ spin_unlock(udc->lock);
+ err = usb_ep_set_halt(&udc->ci13xxx_ep[num].ep);
+ spin_lock(udc->lock);
+ if (!err)
+ isr_setup_status_phase(udc);
+ } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
+ if (req.wLength != 0)
+ break;
+ switch (le16_to_cpu(req.wValue)) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ udc->remote_wakeup = 1;
+ err = isr_setup_status_phase(udc);
+ break;
+ case USB_DEVICE_TEST_MODE:
+ tmode = le16_to_cpu(req.wIndex) >> 8;
+ switch (tmode) {
+ case TEST_J:
+ case TEST_K:
+ case TEST_SE0_NAK:
+ case TEST_PACKET:
+ case TEST_FORCE_EN:
+ udc->test_mode = tmode;
+ err = isr_setup_status_phase(
+ udc);
+ break;
+ default:
+ break;
+ }
+ default:
+ goto delegate;
+ }
+ } else {
+ goto delegate;
+ }
break;
default:
delegate:
@@ -2178,15 +2267,15 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req,
/* push request */
mReq->req.status = -EINPROGRESS;
mReq->req.actual = 0;
- list_add_tail(&mReq->queue, &mEp->qh.queue);
- if (list_is_singular(&mEp->qh.queue))
- retval = _hardware_enqueue(mEp, mReq);
+ retval = _hardware_enqueue(mEp, mReq);
if (retval == -EALREADY) {
dbg_event(_usb_addr(mEp), "QUEUE", retval);
retval = 0;
}
+ if (!retval)
+ list_add_tail(&mReq->queue, &mEp->qh.queue);
done:
spin_unlock_irqrestore(mEp->lock, flags);
@@ -2206,19 +2295,25 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
trace("%p, %p", ep, req);
- if (ep == NULL || req == NULL || mEp->desc == NULL ||
- list_empty(&mReq->queue) || list_empty(&mEp->qh.queue))
+ if (ep == NULL || req == NULL || mReq->req.status != -EALREADY ||
+ mEp->desc == NULL || list_empty(&mReq->queue) ||
+ list_empty(&mEp->qh.queue))
return -EINVAL;
spin_lock_irqsave(mEp->lock, flags);
dbg_event(_usb_addr(mEp), "DEQUEUE", 0);
- if (mReq->req.status == -EALREADY)
- _hardware_dequeue(mEp, mReq);
+ hw_ep_flush(mEp->num, mEp->dir);
/* pop request */
list_del_init(&mReq->queue);
+ if (mReq->map) {
+ dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
+ mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ mReq->req.dma = 0;
+ mReq->map = 0;
+ }
req->status = -ECONNRESET;
if (mReq->req.complete != NULL) {
@@ -2377,6 +2472,31 @@ static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
return 0;
}
+static int ci13xxx_wakeup(struct usb_gadget *_gadget)
+{
+ struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
+ unsigned long flags;
+ int ret = 0;
+
+ trace();
+
+ spin_lock_irqsave(udc->lock, flags);
+ if (!udc->remote_wakeup) {
+ ret = -EOPNOTSUPP;
+ dbg_trace("remote wakeup feature is not enabled\n");
+ goto out;
+ }
+ if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) {
+ ret = -EINVAL;
+ dbg_trace("port is not suspended\n");
+ goto out;
+ }
+ hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR);
+out:
+ spin_unlock_irqrestore(udc->lock, flags);
+ return ret;
+}
+
/**
* Device operations part of the API to the USB controller hardware,
* which don't involve endpoints (or i/o)
@@ -2384,6 +2504,7 @@ static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
*/
static const struct usb_gadget_ops usb_gadget_ops = {
.vbus_session = ci13xxx_vbus_session,
+ .wakeup = ci13xxx_wakeup,
};
/**
@@ -2626,6 +2747,12 @@ static irqreturn_t udc_irq(void)
isr_statistics.pci++;
udc->gadget.speed = hw_port_is_high_speed() ?
USB_SPEED_HIGH : USB_SPEED_FULL;
+ if (udc->suspended) {
+ spin_unlock(udc->lock);
+ udc->driver->resume(&udc->gadget);
+ spin_lock(udc->lock);
+ udc->suspended = 0;
+ }
}
if (USBi_UEI & intr)
isr_statistics.uei++;
@@ -2633,8 +2760,15 @@ static irqreturn_t udc_irq(void)
isr_statistics.ui++;
isr_tr_complete_handler(udc);
}
- if (USBi_SLI & intr)
+ if (USBi_SLI & intr) {
+ if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
+ udc->suspended = 1;
+ spin_unlock(udc->lock);
+ udc->driver->suspend(&udc->gadget);
+ spin_lock(udc->lock);
+ }
isr_statistics.sli++;
+ }
retval = IRQ_HANDLED;
} else {
isr_statistics.none++;
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
index a2492b65f98..23707775cb4 100644
--- a/drivers/usb/gadget/ci13xxx_udc.h
+++ b/drivers/usb/gadget/ci13xxx_udc.h
@@ -33,6 +33,7 @@ struct ci13xxx_td {
/* 0 */
u32 next;
#define TD_TERMINATE BIT(0)
+#define TD_ADDR_MASK (0xFFFFFFEUL << 5)
/* 1 */
u32 token;
#define TD_STATUS (0x00FFUL << 0)
@@ -74,6 +75,8 @@ struct ci13xxx_req {
struct list_head queue;
struct ci13xxx_td *ptr;
dma_addr_t dma;
+ struct ci13xxx_td *zptr;
+ dma_addr_t zdma;
};
/* Extension of usb_ep */
@@ -125,6 +128,10 @@ struct ci13xxx {
u32 ep0_dir; /* ep0 direction */
#define ep0out ci13xxx_ep[0]
#define ep0in ci13xxx_ep[16]
+ u8 remote_wakeup; /* Is remote wakeup feature
+ enabled by the host? */
+ u8 suspended; /* suspended by the host */
+ u8 test_mode; /* the selected test mode */
struct usb_gadget_driver *driver; /* 3rd party gadget driver */
struct ci13xxx_udc_driver *udc_driver; /* device controller driver */
@@ -152,6 +159,7 @@ struct ci13xxx {
#define USBCMD_RS BIT(0)
#define USBCMD_RST BIT(1)
#define USBCMD_SUTW BIT(13)
+#define USBCMD_ATDTW BIT(14)
/* USBSTS & USBINTR */
#define USBi_UI BIT(0)
@@ -165,6 +173,7 @@ struct ci13xxx {
#define DEVICEADDR_USBADR (0x7FUL << 25)
/* PORTSC */
+#define PORTSC_FPR BIT(6)
#define PORTSC_SUSP BIT(7)
#define PORTSC_HSP BIT(9)
#define PORTSC_PTC (0x0FUL << 16)
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 1ba4befe336..c2251c40a20 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -813,7 +813,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
*/
req->zero = 0;
req->complete = composite_setup_complete;
- req->length = USB_BUFSIZ;
+ req->length = 0;
gadget->ep0->driver_data = cdev;
switch (ctrl->bRequest) {
@@ -887,7 +887,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
case USB_REQ_SET_INTERFACE:
if (ctrl->bRequestType != USB_RECIP_INTERFACE)
goto unknown;
- if (!cdev->config || w_index >= MAX_CONFIG_INTERFACES)
+ if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
break;
f = cdev->config->interface[intf];
if (!f)
@@ -899,7 +899,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
case USB_REQ_GET_INTERFACE:
if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE))
goto unknown;
- if (!cdev->config || w_index >= MAX_CONFIG_INTERFACES)
+ if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
break;
f = cdev->config->interface[intf];
if (!f)
@@ -928,7 +928,7 @@ unknown:
*/
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_INTERFACE:
- if (!cdev->config || w_index >= MAX_CONFIG_INTERFACES)
+ if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
break;
f = cdev->config->interface[intf];
break;
@@ -1258,16 +1258,16 @@ static struct usb_gadget_driver composite_driver = {
* while it was binding. That would usually be done in order to wait for
* some userspace participation.
*/
-extern int usb_composite_probe(struct usb_composite_driver *driver,
+int usb_composite_probe(struct usb_composite_driver *driver,
int (*bind)(struct usb_composite_dev *cdev))
{
if (!driver || !driver->dev || !bind || composite)
return -EINVAL;
- if (!driver->iProduct)
- driver->iProduct = driver->name;
if (!driver->name)
driver->name = "composite";
+ if (!driver->iProduct)
+ driver->iProduct = driver->name;
composite_driver.function = (char *) driver->name;
composite_driver.driver.name = driver->name;
composite = driver;
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index 13b9f47feec..3214ca375d6 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -1593,8 +1593,8 @@ hub_descriptor (struct usb_hub_descriptor *desc)
desc->bDescLength = 9;
desc->wHubCharacteristics = cpu_to_le16(0x0001);
desc->bNbrPorts = 1;
- desc->bitmap [0] = 0xff;
- desc->bitmap [1] = 0xff;
+ desc->u.hs.DeviceRemovable[0] = 0xff;
+ desc->u.hs.DeviceRemovable[1] = 0xff;
}
static int dummy_hub_control (
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 8a832488ccd..9b7360ff5aa 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -128,6 +128,13 @@ ep_matches (
}
}
+ /*
+ * If the protocol driver hasn't yet decided on wMaxPacketSize
+ * and wants to know the maximum possible, provide the info.
+ */
+ if (desc->wMaxPacketSize == 0)
+ desc->wMaxPacketSize = cpu_to_le16(ep->maxpacket);
+
/* endpoint maxpacket size is an input parameter, except for bulk
* where it's an output parameter representing the full speed limit.
* the usb spec fixes high speed bulk maxpacket at 512 bytes.
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 1499f9e4afa..19fffccc370 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -368,6 +368,14 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
req->buf = data;
req->length = len;
+ /*
+ * UDC layer requires to provide a buffer even for ZLP, but should
+ * not use it at all. Let's provide some poisoned pointer to catch
+ * possible bug in the driver.
+ */
+ if (req->buf == NULL)
+ req->buf = (void *)0xDEADBABE;
+
INIT_COMPLETION(ffs->ep0req_completion);
ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
diff --git a/drivers/usb/gadget/fsl_mxc_udc.c b/drivers/usb/gadget/fsl_mxc_udc.c
index 77b1eb57702..43a49ecc1f3 100644
--- a/drivers/usb/gadget/fsl_mxc_udc.c
+++ b/drivers/usb/gadget/fsl_mxc_udc.c
@@ -88,15 +88,18 @@ eenahb:
void fsl_udc_clk_finalize(struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
-#if defined(CONFIG_ARCH_MX35)
- unsigned int v;
-
- /* workaround ENGcm09152 for i.MX35 */
- if (pdata->workaround & FLS_USB2_WORKAROUND_ENGCM09152) {
- v = readl(MX35_IO_ADDRESS(MX35_USB_BASE_ADDR +
- USBPHYCTRL_OTGBASE_OFFSET));
- writel(v | USBPHYCTRL_EVDO, MX35_IO_ADDRESS(MX35_USB_BASE_ADDR +
- USBPHYCTRL_OTGBASE_OFFSET));
+#if defined(CONFIG_SOC_IMX35)
+ if (cpu_is_mx35()) {
+ unsigned int v;
+
+ /* workaround ENGcm09152 for i.MX35 */
+ if (pdata->workaround & FLS_USB2_WORKAROUND_ENGCM09152) {
+ v = readl(MX35_IO_ADDRESS(MX35_USB_BASE_ADDR +
+ USBPHYCTRL_OTGBASE_OFFSET));
+ writel(v | USBPHYCTRL_EVDO,
+ MX35_IO_ADDRESS(MX35_USB_BASE_ADDR +
+ USBPHYCTRL_OTGBASE_OFFSET));
+ }
}
#endif
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index 4c55eda4bd2..912cb8e63fe 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -766,7 +766,6 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
struct fsl_req *req = container_of(_req, struct fsl_req, req);
struct fsl_udc *udc;
unsigned long flags;
- int is_iso = 0;
/* catch various bogus parameters */
if (!_req || !req->req.complete || !req->req.buf
@@ -781,7 +780,6 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
if (req->req.length > ep->ep.maxpacket)
return -EMSGSIZE;
- is_iso = 1;
}
udc = ep->udc;
diff --git a/drivers/usb/gadget/fusb300_udc.c b/drivers/usb/gadget/fusb300_udc.c
new file mode 100644
index 00000000000..763d462454b
--- /dev/null
+++ b/drivers/usb/gadget/fusb300_udc.c
@@ -0,0 +1,1744 @@
+/*
+ * Fusb300 UDC (USB gadget)
+ *
+ * Copyright (C) 2010 Faraday Technology Corp.
+ *
+ * Author : Yuan-hsin Chen <yhchen@faraday-tech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include "fusb300_udc.h"
+
+MODULE_DESCRIPTION("FUSB300 USB gadget driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yuan Hsin Chen <yhchen@faraday-tech.com>");
+MODULE_ALIAS("platform:fusb300_udc");
+
+#define DRIVER_VERSION "20 October 2010"
+
+static const char udc_name[] = "fusb300_udc";
+static const char * const fusb300_ep_name[] = {
+ "ep0", "ep1", "ep2", "ep3", "ep4", "ep5", "ep6", "ep7", "ep8", "ep9",
+ "ep10", "ep11", "ep12", "ep13", "ep14", "ep15"
+};
+
+static void done(struct fusb300_ep *ep, struct fusb300_request *req,
+ int status);
+
+static void fusb300_enable_bit(struct fusb300 *fusb300, u32 offset,
+ u32 value)
+{
+ u32 reg = ioread32(fusb300->reg + offset);
+
+ reg |= value;
+ iowrite32(reg, fusb300->reg + offset);
+}
+
+static void fusb300_disable_bit(struct fusb300 *fusb300, u32 offset,
+ u32 value)
+{
+ u32 reg = ioread32(fusb300->reg + offset);
+
+ reg &= ~value;
+ iowrite32(reg, fusb300->reg + offset);
+}
+
+
+static void fusb300_ep_setting(struct fusb300_ep *ep,
+ struct fusb300_ep_info info)
+{
+ ep->epnum = info.epnum;
+ ep->type = info.type;
+}
+
+static int fusb300_ep_release(struct fusb300_ep *ep)
+{
+ if (!ep->epnum)
+ return 0;
+ ep->epnum = 0;
+ ep->stall = 0;
+ ep->wedged = 0;
+ return 0;
+}
+
+static void fusb300_set_fifo_entry(struct fusb300 *fusb300,
+ u32 ep)
+{
+ u32 val = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
+
+ val &= ~FUSB300_EPSET1_FIFOENTRY_MSK;
+ val |= FUSB300_EPSET1_FIFOENTRY(FUSB300_FIFO_ENTRY_NUM);
+ iowrite32(val, fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
+}
+
+static void fusb300_set_start_entry(struct fusb300 *fusb300,
+ u8 ep)
+{
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
+ u32 start_entry = fusb300->fifo_entry_num * FUSB300_FIFO_ENTRY_NUM;
+
+ reg &= ~FUSB300_EPSET1_START_ENTRY_MSK ;
+ reg |= FUSB300_EPSET1_START_ENTRY(start_entry);
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
+ if (fusb300->fifo_entry_num == FUSB300_MAX_FIFO_ENTRY) {
+ fusb300->fifo_entry_num = 0;
+ fusb300->addrofs = 0;
+ pr_err("fifo entry is over the maximum number!\n");
+ } else
+ fusb300->fifo_entry_num++;
+}
+
+/* set fusb300_set_start_entry first before fusb300_set_epaddrofs */
+static void fusb300_set_epaddrofs(struct fusb300 *fusb300,
+ struct fusb300_ep_info info)
+{
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum));
+
+ reg &= ~FUSB300_EPSET2_ADDROFS_MSK;
+ reg |= FUSB300_EPSET2_ADDROFS(fusb300->addrofs);
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum));
+ fusb300->addrofs += (info.maxpacket + 7) / 8 * FUSB300_FIFO_ENTRY_NUM;
+}
+
+static void ep_fifo_setting(struct fusb300 *fusb300,
+ struct fusb300_ep_info info)
+{
+ fusb300_set_fifo_entry(fusb300, info.epnum);
+ fusb300_set_start_entry(fusb300, info.epnum);
+ fusb300_set_epaddrofs(fusb300, info);
+}
+
+static void fusb300_set_eptype(struct fusb300 *fusb300,
+ struct fusb300_ep_info info)
+{
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
+
+ reg &= ~FUSB300_EPSET1_TYPE_MSK;
+ reg |= FUSB300_EPSET1_TYPE(info.type);
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
+}
+
+static void fusb300_set_epdir(struct fusb300 *fusb300,
+ struct fusb300_ep_info info)
+{
+ u32 reg;
+
+ if (!info.dir_in)
+ return;
+ reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
+ reg &= ~FUSB300_EPSET1_DIR_MSK;
+ reg |= FUSB300_EPSET1_DIRIN;
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
+}
+
+static void fusb300_set_ep_active(struct fusb300 *fusb300,
+ u8 ep)
+{
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
+
+ reg |= FUSB300_EPSET1_ACTEN;
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
+}
+
+static void fusb300_set_epmps(struct fusb300 *fusb300,
+ struct fusb300_ep_info info)
+{
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum));
+
+ reg &= ~FUSB300_EPSET2_MPS_MSK;
+ reg |= FUSB300_EPSET2_MPS(info.maxpacket);
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum));
+}
+
+static void fusb300_set_interval(struct fusb300 *fusb300,
+ struct fusb300_ep_info info)
+{
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
+
+ reg &= ~FUSB300_EPSET1_INTERVAL(0x7);
+ reg |= FUSB300_EPSET1_INTERVAL(info.interval);
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
+}
+
+static void fusb300_set_bwnum(struct fusb300 *fusb300,
+ struct fusb300_ep_info info)
+{
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
+
+ reg &= ~FUSB300_EPSET1_BWNUM(0x3);
+ reg |= FUSB300_EPSET1_BWNUM(info.bw_num);
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
+}
+
+static void set_ep_reg(struct fusb300 *fusb300,
+ struct fusb300_ep_info info)
+{
+ fusb300_set_eptype(fusb300, info);
+ fusb300_set_epdir(fusb300, info);
+ fusb300_set_epmps(fusb300, info);
+
+ if (info.interval)
+ fusb300_set_interval(fusb300, info);
+
+ if (info.bw_num)
+ fusb300_set_bwnum(fusb300, info);
+
+ fusb300_set_ep_active(fusb300, info.epnum);
+}
+
+static int config_ep(struct fusb300_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct fusb300 *fusb300 = ep->fusb300;
+ struct fusb300_ep_info info;
+
+ ep->desc = desc;
+
+ info.interval = 0;
+ info.addrofs = 0;
+ info.bw_num = 0;
+
+ info.type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+ info.dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
+ info.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
+ info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+
+ if ((info.type == USB_ENDPOINT_XFER_INT) ||
+ (info.type == USB_ENDPOINT_XFER_ISOC)) {
+ info.interval = desc->bInterval;
+ if (info.type == USB_ENDPOINT_XFER_ISOC)
+ info.bw_num = ((desc->wMaxPacketSize & 0x1800) >> 11);
+ }
+
+ ep_fifo_setting(fusb300, info);
+
+ set_ep_reg(fusb300, info);
+
+ fusb300_ep_setting(ep, info);
+
+ fusb300->ep[info.epnum] = ep;
+
+ return 0;
+}
+
+static int fusb300_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct fusb300_ep *ep;
+
+ ep = container_of(_ep, struct fusb300_ep, ep);
+
+ if (ep->fusb300->reenum) {
+ ep->fusb300->fifo_entry_num = 0;
+ ep->fusb300->addrofs = 0;
+ ep->fusb300->reenum = 0;
+ }
+
+ return config_ep(ep, desc);
+}
+
+static int fusb300_disable(struct usb_ep *_ep)
+{
+ struct fusb300_ep *ep;
+ struct fusb300_request *req;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct fusb300_ep, ep);
+
+ BUG_ON(!ep);
+
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next, struct fusb300_request, queue);
+ spin_lock_irqsave(&ep->fusb300->lock, flags);
+ done(ep, req, -ECONNRESET);
+ spin_unlock_irqrestore(&ep->fusb300->lock, flags);
+ }
+
+ return fusb300_ep_release(ep);
+}
+
+static struct usb_request *fusb300_alloc_request(struct usb_ep *_ep,
+ gfp_t gfp_flags)
+{
+ struct fusb300_request *req;
+
+ req = kzalloc(sizeof(struct fusb300_request), gfp_flags);
+ if (!req)
+ return NULL;
+ INIT_LIST_HEAD(&req->queue);
+
+ return &req->req;
+}
+
+static void fusb300_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct fusb300_request *req;
+
+ req = container_of(_req, struct fusb300_request, req);
+ kfree(req);
+}
+
+static int enable_fifo_int(struct fusb300_ep *ep)
+{
+ struct fusb300 *fusb300 = ep->fusb300;
+
+ if (ep->epnum) {
+ fusb300_enable_bit(fusb300, FUSB300_OFFSET_IGER0,
+ FUSB300_IGER0_EEPn_FIFO_INT(ep->epnum));
+ } else {
+ pr_err("can't enable_fifo_int ep0\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int disable_fifo_int(struct fusb300_ep *ep)
+{
+ struct fusb300 *fusb300 = ep->fusb300;
+
+ if (ep->epnum) {
+ fusb300_disable_bit(fusb300, FUSB300_OFFSET_IGER0,
+ FUSB300_IGER0_EEPn_FIFO_INT(ep->epnum));
+ } else {
+ pr_err("can't disable_fifo_int ep0\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void fusb300_set_cxlen(struct fusb300 *fusb300, u32 length)
+{
+ u32 reg;
+
+ reg = ioread32(fusb300->reg + FUSB300_OFFSET_CSR);
+ reg &= ~FUSB300_CSR_LEN_MSK;
+ reg |= FUSB300_CSR_LEN(length);
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_CSR);
+}
+
+/* write data to cx fifo */
+static void fusb300_wrcxf(struct fusb300_ep *ep,
+ struct fusb300_request *req)
+{
+ int i = 0;
+ u8 *tmp;
+ u32 data;
+ struct fusb300 *fusb300 = ep->fusb300;
+ u32 length = req->req.length - req->req.actual;
+
+ tmp = req->req.buf + req->req.actual;
+
+ if (length > SS_CTL_MAX_PACKET_SIZE) {
+ fusb300_set_cxlen(fusb300, SS_CTL_MAX_PACKET_SIZE);
+ for (i = (SS_CTL_MAX_PACKET_SIZE >> 2); i > 0; i--) {
+ data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16 |
+ *(tmp + 3) << 24;
+ iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT);
+ tmp += 4;
+ }
+ req->req.actual += SS_CTL_MAX_PACKET_SIZE;
+ } else { /* length is less than max packet size */
+ fusb300_set_cxlen(fusb300, length);
+ for (i = length >> 2; i > 0; i--) {
+ data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16 |
+ *(tmp + 3) << 24;
+ printk(KERN_DEBUG " 0x%x\n", data);
+ iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT);
+ tmp = tmp + 4;
+ }
+ switch (length % 4) {
+ case 1:
+ data = *tmp;
+ printk(KERN_DEBUG " 0x%x\n", data);
+ iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT);
+ break;
+ case 2:
+ data = *tmp | *(tmp + 1) << 8;
+ printk(KERN_DEBUG " 0x%x\n", data);
+ iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT);
+ break;
+ case 3:
+ data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16;
+ printk(KERN_DEBUG " 0x%x\n", data);
+ iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT);
+ break;
+ default:
+ break;
+ }
+ req->req.actual += length;
+ }
+}
+
+static void fusb300_set_epnstall(struct fusb300 *fusb300, u8 ep)
+{
+ fusb300_enable_bit(fusb300, FUSB300_OFFSET_EPSET0(ep),
+ FUSB300_EPSET0_STL);
+}
+
+static void fusb300_clear_epnstall(struct fusb300 *fusb300, u8 ep)
+{
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET0(ep));
+
+ if (reg & FUSB300_EPSET0_STL) {
+ printk(KERN_DEBUG "EP%d stall... Clear!!\n", ep);
+ reg &= ~FUSB300_EPSET0_STL;
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET0(ep));
+ }
+}
+
+static void ep0_queue(struct fusb300_ep *ep, struct fusb300_request *req)
+{
+ if (ep->fusb300->ep0_dir) { /* if IN */
+ if (req->req.length) {
+ fusb300_wrcxf(ep, req);
+ } else
+ printk(KERN_DEBUG "%s : req->req.length = 0x%x\n",
+ __func__, req->req.length);
+ if ((req->req.length == req->req.actual) ||
+ (req->req.actual < ep->ep.maxpacket))
+ done(ep, req, 0);
+ } else { /* OUT */
+ if (!req->req.length)
+ done(ep, req, 0);
+ else
+ fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_IGER1,
+ FUSB300_IGER1_CX_OUT_INT);
+ }
+}
+
+static int fusb300_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t gfp_flags)
+{
+ struct fusb300_ep *ep;
+ struct fusb300_request *req;
+ unsigned long flags;
+ int request = 0;
+
+ ep = container_of(_ep, struct fusb300_ep, ep);
+ req = container_of(_req, struct fusb300_request, req);
+
+ if (ep->fusb300->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ spin_lock_irqsave(&ep->fusb300->lock, flags);
+
+ if (list_empty(&ep->queue))
+ request = 1;
+
+ list_add_tail(&req->queue, &ep->queue);
+
+ req->req.actual = 0;
+ req->req.status = -EINPROGRESS;
+
+ if (ep->desc == NULL) /* ep0 */
+ ep0_queue(ep, req);
+ else if (request && !ep->stall)
+ enable_fifo_int(ep);
+
+ spin_unlock_irqrestore(&ep->fusb300->lock, flags);
+
+ return 0;
+}
+
+static int fusb300_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct fusb300_ep *ep;
+ struct fusb300_request *req;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct fusb300_ep, ep);
+ req = container_of(_req, struct fusb300_request, req);
+
+ spin_lock_irqsave(&ep->fusb300->lock, flags);
+ if (!list_empty(&ep->queue))
+ done(ep, req, -ECONNRESET);
+ spin_unlock_irqrestore(&ep->fusb300->lock, flags);
+
+ return 0;
+}
+
+static int fusb300_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedge)
+{
+ struct fusb300_ep *ep;
+ struct fusb300 *fusb300;
+ unsigned long flags;
+ int ret = 0;
+
+ ep = container_of(_ep, struct fusb300_ep, ep);
+
+ fusb300 = ep->fusb300;
+
+ spin_lock_irqsave(&ep->fusb300->lock, flags);
+
+ if (!list_empty(&ep->queue)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (value) {
+ fusb300_set_epnstall(fusb300, ep->epnum);
+ ep->stall = 1;
+ if (wedge)
+ ep->wedged = 1;
+ } else {
+ fusb300_clear_epnstall(fusb300, ep->epnum);
+ ep->stall = 0;
+ ep->wedged = 0;
+ }
+
+out:
+ spin_unlock_irqrestore(&ep->fusb300->lock, flags);
+ return ret;
+}
+
+static int fusb300_set_halt(struct usb_ep *_ep, int value)
+{
+ return fusb300_set_halt_and_wedge(_ep, value, 0);
+}
+
+static int fusb300_set_wedge(struct usb_ep *_ep)
+{
+ return fusb300_set_halt_and_wedge(_ep, 1, 1);
+}
+
+static void fusb300_fifo_flush(struct usb_ep *_ep)
+{
+}
+
+static struct usb_ep_ops fusb300_ep_ops = {
+ .enable = fusb300_enable,
+ .disable = fusb300_disable,
+
+ .alloc_request = fusb300_alloc_request,
+ .free_request = fusb300_free_request,
+
+ .queue = fusb300_queue,
+ .dequeue = fusb300_dequeue,
+
+ .set_halt = fusb300_set_halt,
+ .fifo_flush = fusb300_fifo_flush,
+ .set_wedge = fusb300_set_wedge,
+};
+
+/*****************************************************************************/
+static void fusb300_clear_int(struct fusb300 *fusb300, u32 offset,
+ u32 value)
+{
+ iowrite32(value, fusb300->reg + offset);
+}
+
+static void fusb300_reset(void)
+{
+}
+
+static void fusb300_set_cxstall(struct fusb300 *fusb300)
+{
+ fusb300_enable_bit(fusb300, FUSB300_OFFSET_CSR,
+ FUSB300_CSR_STL);
+}
+
+static void fusb300_set_cxdone(struct fusb300 *fusb300)
+{
+ fusb300_enable_bit(fusb300, FUSB300_OFFSET_CSR,
+ FUSB300_CSR_DONE);
+}
+
+/* read data from cx fifo */
+void fusb300_rdcxf(struct fusb300 *fusb300,
+ u8 *buffer, u32 length)
+{
+ int i = 0;
+ u8 *tmp;
+ u32 data;
+
+ tmp = buffer;
+
+ for (i = (length >> 2); i > 0; i--) {
+ data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT);
+ printk(KERN_DEBUG " 0x%x\n", data);
+ *tmp = data & 0xFF;
+ *(tmp + 1) = (data >> 8) & 0xFF;
+ *(tmp + 2) = (data >> 16) & 0xFF;
+ *(tmp + 3) = (data >> 24) & 0xFF;
+ tmp = tmp + 4;
+ }
+
+ switch (length % 4) {
+ case 1:
+ data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT);
+ printk(KERN_DEBUG " 0x%x\n", data);
+ *tmp = data & 0xFF;
+ break;
+ case 2:
+ data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT);
+ printk(KERN_DEBUG " 0x%x\n", data);
+ *tmp = data & 0xFF;
+ *(tmp + 1) = (data >> 8) & 0xFF;
+ break;
+ case 3:
+ data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT);
+ printk(KERN_DEBUG " 0x%x\n", data);
+ *tmp = data & 0xFF;
+ *(tmp + 1) = (data >> 8) & 0xFF;
+ *(tmp + 2) = (data >> 16) & 0xFF;
+ break;
+ default:
+ break;
+ }
+}
+
+#if 0
+static void fusb300_dbg_fifo(struct fusb300_ep *ep,
+ u8 entry, u16 length)
+{
+ u32 reg;
+ u32 i = 0;
+ u32 j = 0;
+
+ reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_GTM);
+ reg &= ~(FUSB300_GTM_TST_EP_ENTRY(0xF) |
+ FUSB300_GTM_TST_EP_NUM(0xF) | FUSB300_GTM_TST_FIFO_DEG);
+ reg |= (FUSB300_GTM_TST_EP_ENTRY(entry) |
+ FUSB300_GTM_TST_EP_NUM(ep->epnum) | FUSB300_GTM_TST_FIFO_DEG);
+ iowrite32(reg, ep->fusb300->reg + FUSB300_OFFSET_GTM);
+
+ for (i = 0; i < (length >> 2); i++) {
+ if (i * 4 == 1024)
+ break;
+ reg = ioread32(ep->fusb300->reg +
+ FUSB300_OFFSET_BUFDBG_START + i * 4);
+ printk(KERN_DEBUG" 0x%-8x", reg);
+ j++;
+ if ((j % 4) == 0)
+ printk(KERN_DEBUG "\n");
+ }
+
+ if (length % 4) {
+ reg = ioread32(ep->fusb300->reg +
+ FUSB300_OFFSET_BUFDBG_START + i * 4);
+ printk(KERN_DEBUG " 0x%x\n", reg);
+ }
+
+ if ((j % 4) != 0)
+ printk(KERN_DEBUG "\n");
+
+ fusb300_disable_bit(ep->fusb300, FUSB300_OFFSET_GTM,
+ FUSB300_GTM_TST_FIFO_DEG);
+}
+
+static void fusb300_cmp_dbg_fifo(struct fusb300_ep *ep,
+ u8 entry, u16 length, u8 *golden)
+{
+ u32 reg;
+ u32 i = 0;
+ u32 golden_value;
+ u8 *tmp;
+
+ tmp = golden;
+
+ printk(KERN_DEBUG "fusb300_cmp_dbg_fifo (entry %d) : start\n", entry);
+
+ reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_GTM);
+ reg &= ~(FUSB300_GTM_TST_EP_ENTRY(0xF) |
+ FUSB300_GTM_TST_EP_NUM(0xF) | FUSB300_GTM_TST_FIFO_DEG);
+ reg |= (FUSB300_GTM_TST_EP_ENTRY(entry) |
+ FUSB300_GTM_TST_EP_NUM(ep->epnum) | FUSB300_GTM_TST_FIFO_DEG);
+ iowrite32(reg, ep->fusb300->reg + FUSB300_OFFSET_GTM);
+
+ for (i = 0; i < (length >> 2); i++) {
+ if (i * 4 == 1024)
+ break;
+ golden_value = *tmp | *(tmp + 1) << 8 |
+ *(tmp + 2) << 16 | *(tmp + 3) << 24;
+
+ reg = ioread32(ep->fusb300->reg +
+ FUSB300_OFFSET_BUFDBG_START + i*4);
+
+ if (reg != golden_value) {
+ printk(KERN_DEBUG "0x%x : ", (u32)(ep->fusb300->reg +
+ FUSB300_OFFSET_BUFDBG_START + i*4));
+ printk(KERN_DEBUG " golden = 0x%x, reg = 0x%x\n",
+ golden_value, reg);
+ }
+ tmp += 4;
+ }
+
+ switch (length % 4) {
+ case 1:
+ golden_value = *tmp;
+ case 2:
+ golden_value = *tmp | *(tmp + 1) << 8;
+ case 3:
+ golden_value = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16;
+ default:
+ break;
+
+ reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_BUFDBG_START + i*4);
+ if (reg != golden_value) {
+ printk(KERN_DEBUG "0x%x:", (u32)(ep->fusb300->reg +
+ FUSB300_OFFSET_BUFDBG_START + i*4));
+ printk(KERN_DEBUG " golden = 0x%x, reg = 0x%x\n",
+ golden_value, reg);
+ }
+ }
+
+ printk(KERN_DEBUG "fusb300_cmp_dbg_fifo : end\n");
+ fusb300_disable_bit(ep->fusb300, FUSB300_OFFSET_GTM,
+ FUSB300_GTM_TST_FIFO_DEG);
+}
+#endif
+
+static void fusb300_rdfifo(struct fusb300_ep *ep,
+ struct fusb300_request *req,
+ u32 length)
+{
+ int i = 0;
+ u8 *tmp;
+ u32 data, reg;
+ struct fusb300 *fusb300 = ep->fusb300;
+
+ tmp = req->req.buf + req->req.actual;
+ req->req.actual += length;
+
+ if (req->req.actual > req->req.length)
+ printk(KERN_DEBUG "req->req.actual > req->req.length\n");
+
+ for (i = (length >> 2); i > 0; i--) {
+ data = ioread32(fusb300->reg +
+ FUSB300_OFFSET_EPPORT(ep->epnum));
+ *tmp = data & 0xFF;
+ *(tmp + 1) = (data >> 8) & 0xFF;
+ *(tmp + 2) = (data >> 16) & 0xFF;
+ *(tmp + 3) = (data >> 24) & 0xFF;
+ tmp = tmp + 4;
+ }
+
+ switch (length % 4) {
+ case 1:
+ data = ioread32(fusb300->reg +
+ FUSB300_OFFSET_EPPORT(ep->epnum));
+ *tmp = data & 0xFF;
+ break;
+ case 2:
+ data = ioread32(fusb300->reg +
+ FUSB300_OFFSET_EPPORT(ep->epnum));
+ *tmp = data & 0xFF;
+ *(tmp + 1) = (data >> 8) & 0xFF;
+ break;
+ case 3:
+ data = ioread32(fusb300->reg +
+ FUSB300_OFFSET_EPPORT(ep->epnum));
+ *tmp = data & 0xFF;
+ *(tmp + 1) = (data >> 8) & 0xFF;
+ *(tmp + 2) = (data >> 16) & 0xFF;
+ break;
+ default:
+ break;
+ }
+
+ do {
+ reg = ioread32(fusb300->reg + FUSB300_OFFSET_IGR1);
+ reg &= FUSB300_IGR1_SYNF0_EMPTY_INT;
+ if (i)
+ printk(KERN_INFO "sync fifo is not empty!\n");
+ i++;
+ } while (!reg);
+}
+
+/* write data to fifo */
+static void fusb300_wrfifo(struct fusb300_ep *ep,
+ struct fusb300_request *req)
+{
+ int i = 0;
+ u8 *tmp;
+ u32 data, reg;
+ struct fusb300 *fusb300 = ep->fusb300;
+
+ tmp = req->req.buf;
+ req->req.actual = req->req.length;
+
+ for (i = (req->req.length >> 2); i > 0; i--) {
+ data = *tmp | *(tmp + 1) << 8 |
+ *(tmp + 2) << 16 | *(tmp + 3) << 24;
+
+ iowrite32(data, fusb300->reg +
+ FUSB300_OFFSET_EPPORT(ep->epnum));
+ tmp += 4;
+ }
+
+ switch (req->req.length % 4) {
+ case 1:
+ data = *tmp;
+ iowrite32(data, fusb300->reg +
+ FUSB300_OFFSET_EPPORT(ep->epnum));
+ break;
+ case 2:
+ data = *tmp | *(tmp + 1) << 8;
+ iowrite32(data, fusb300->reg +
+ FUSB300_OFFSET_EPPORT(ep->epnum));
+ break;
+ case 3:
+ data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16;
+ iowrite32(data, fusb300->reg +
+ FUSB300_OFFSET_EPPORT(ep->epnum));
+ break;
+ default:
+ break;
+ }
+
+ do {
+ reg = ioread32(fusb300->reg + FUSB300_OFFSET_IGR1);
+ reg &= FUSB300_IGR1_SYNF0_EMPTY_INT;
+ if (i)
+ printk(KERN_INFO"sync fifo is not empty!\n");
+ i++;
+ } while (!reg);
+}
+
+static u8 fusb300_get_epnstall(struct fusb300 *fusb300, u8 ep)
+{
+ u8 value;
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET0(ep));
+
+ value = reg & FUSB300_EPSET0_STL;
+
+ return value;
+}
+
+static u8 fusb300_get_cxstall(struct fusb300 *fusb300)
+{
+ u8 value;
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_CSR);
+
+ value = (reg & FUSB300_CSR_STL) >> 1;
+
+ return value;
+}
+
+static void request_error(struct fusb300 *fusb300)
+{
+ fusb300_set_cxstall(fusb300);
+ printk(KERN_DEBUG "request error!!\n");
+}
+
+static void get_status(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
+__releases(fusb300->lock)
+__acquires(fusb300->lock)
+{
+ u8 ep;
+ u16 status = 0;
+ u16 w_index = ctrl->wIndex;
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ status = 1 << USB_DEVICE_SELF_POWERED;
+ break;
+ case USB_RECIP_INTERFACE:
+ status = 0;
+ break;
+ case USB_RECIP_ENDPOINT:
+ ep = w_index & USB_ENDPOINT_NUMBER_MASK;
+ if (ep) {
+ if (fusb300_get_epnstall(fusb300, ep))
+ status = 1 << USB_ENDPOINT_HALT;
+ } else {
+ if (fusb300_get_cxstall(fusb300))
+ status = 0;
+ }
+ break;
+
+ default:
+ request_error(fusb300);
+ return; /* exit */
+ }
+
+ fusb300->ep0_data = cpu_to_le16(status);
+ fusb300->ep0_req->buf = &fusb300->ep0_data;
+ fusb300->ep0_req->length = 2;
+
+ spin_unlock(&fusb300->lock);
+ fusb300_queue(fusb300->gadget.ep0, fusb300->ep0_req, GFP_KERNEL);
+ spin_lock(&fusb300->lock);
+}
+
+static void set_feature(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
+{
+ u8 ep;
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ fusb300_set_cxdone(fusb300);
+ break;
+ case USB_RECIP_INTERFACE:
+ fusb300_set_cxdone(fusb300);
+ break;
+ case USB_RECIP_ENDPOINT: {
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+
+ ep = w_index & USB_ENDPOINT_NUMBER_MASK;
+ if (ep)
+ fusb300_set_epnstall(fusb300, ep);
+ else
+ fusb300_set_cxstall(fusb300);
+ fusb300_set_cxdone(fusb300);
+ }
+ break;
+ default:
+ request_error(fusb300);
+ break;
+ }
+}
+
+static void fusb300_clear_seqnum(struct fusb300 *fusb300, u8 ep)
+{
+ fusb300_enable_bit(fusb300, FUSB300_OFFSET_EPSET0(ep),
+ FUSB300_EPSET0_CLRSEQNUM);
+}
+
+static void clear_feature(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
+{
+ struct fusb300_ep *ep =
+ fusb300->ep[ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK];
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ fusb300_set_cxdone(fusb300);
+ break;
+ case USB_RECIP_INTERFACE:
+ fusb300_set_cxdone(fusb300);
+ break;
+ case USB_RECIP_ENDPOINT:
+ if (ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK) {
+ if (ep->wedged) {
+ fusb300_set_cxdone(fusb300);
+ break;
+ }
+ if (ep->stall) {
+ ep->stall = 0;
+ fusb300_clear_seqnum(fusb300, ep->epnum);
+ fusb300_clear_epnstall(fusb300, ep->epnum);
+ if (!list_empty(&ep->queue))
+ enable_fifo_int(ep);
+ }
+ }
+ fusb300_set_cxdone(fusb300);
+ break;
+ default:
+ request_error(fusb300);
+ break;
+ }
+}
+
+static void fusb300_set_dev_addr(struct fusb300 *fusb300, u16 addr)
+{
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_DAR);
+
+ reg &= ~FUSB300_DAR_DRVADDR_MSK;
+ reg |= FUSB300_DAR_DRVADDR(addr);
+
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_DAR);
+}
+
+static void set_address(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
+{
+ if (ctrl->wValue >= 0x0100)
+ request_error(fusb300);
+ else {
+ fusb300_set_dev_addr(fusb300, ctrl->wValue);
+ fusb300_set_cxdone(fusb300);
+ }
+}
+
+#define UVC_COPY_DESCRIPTORS(mem, src) \
+ do { \
+ const struct usb_descriptor_header * const *__src; \
+ for (__src = src; *__src; ++__src) { \
+ memcpy(mem, *__src, (*__src)->bLength); \
+ mem += (*__src)->bLength; \
+ } \
+ } while (0)
+
+static void fusb300_ep0_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+}
+
+static int setup_packet(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
+{
+ u8 *p = (u8 *)ctrl;
+ u8 ret = 0;
+ u8 i = 0;
+
+ fusb300_rdcxf(fusb300, p, 8);
+ fusb300->ep0_dir = ctrl->bRequestType & USB_DIR_IN;
+ fusb300->ep0_length = ctrl->wLength;
+
+ /* check request */
+ if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+ switch (ctrl->bRequest) {
+ case USB_REQ_GET_STATUS:
+ get_status(fusb300, ctrl);
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ clear_feature(fusb300, ctrl);
+ break;
+ case USB_REQ_SET_FEATURE:
+ set_feature(fusb300, ctrl);
+ break;
+ case USB_REQ_SET_ADDRESS:
+ set_address(fusb300, ctrl);
+ break;
+ case USB_REQ_SET_CONFIGURATION:
+ fusb300_enable_bit(fusb300, FUSB300_OFFSET_DAR,
+ FUSB300_DAR_SETCONFG);
+ /* clear sequence number */
+ for (i = 1; i <= FUSB300_MAX_NUM_EP; i++)
+ fusb300_clear_seqnum(fusb300, i);
+ fusb300->reenum = 1;
+ ret = 1;
+ break;
+ default:
+ ret = 1;
+ break;
+ }
+ } else
+ ret = 1;
+
+ return ret;
+}
+
+static void fusb300_set_ep_bycnt(struct fusb300_ep *ep, u32 bycnt)
+{
+ struct fusb300 *fusb300 = ep->fusb300;
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPFFR(ep->epnum));
+
+ reg &= ~FUSB300_FFR_BYCNT;
+ reg |= bycnt & FUSB300_FFR_BYCNT;
+
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPFFR(ep->epnum));
+}
+
+static void done(struct fusb300_ep *ep, struct fusb300_request *req,
+ int status)
+{
+ list_del_init(&req->queue);
+
+ /* don't modify queue heads during completion callback */
+ if (ep->fusb300->gadget.speed == USB_SPEED_UNKNOWN)
+ req->req.status = -ESHUTDOWN;
+ else
+ req->req.status = status;
+
+ spin_unlock(&ep->fusb300->lock);
+ req->req.complete(&ep->ep, &req->req);
+ spin_lock(&ep->fusb300->lock);
+
+ if (ep->epnum) {
+ disable_fifo_int(ep);
+ if (!list_empty(&ep->queue))
+ enable_fifo_int(ep);
+ } else
+ fusb300_set_cxdone(ep->fusb300);
+}
+
+void fusb300_fill_idma_prdtbl(struct fusb300_ep *ep,
+ struct fusb300_request *req)
+{
+ u32 value;
+ u32 reg;
+
+ /* wait SW owner */
+ do {
+ reg = ioread32(ep->fusb300->reg +
+ FUSB300_OFFSET_EPPRD_W0(ep->epnum));
+ reg &= FUSB300_EPPRD0_H;
+ } while (reg);
+
+ iowrite32((u32) req->req.buf, ep->fusb300->reg +
+ FUSB300_OFFSET_EPPRD_W1(ep->epnum));
+
+ value = FUSB300_EPPRD0_BTC(req->req.length) | FUSB300_EPPRD0_H |
+ FUSB300_EPPRD0_F | FUSB300_EPPRD0_L | FUSB300_EPPRD0_I;
+ iowrite32(value, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W0(ep->epnum));
+
+ iowrite32(0x0, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W2(ep->epnum));
+
+ fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_EPPRDRDY,
+ FUSB300_EPPRDR_EP_PRD_RDY(ep->epnum));
+}
+
+static void fusb300_wait_idma_finished(struct fusb300_ep *ep)
+{
+ u32 reg;
+
+ do {
+ reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_IGR1);
+ if ((reg & FUSB300_IGR1_VBUS_CHG_INT) ||
+ (reg & FUSB300_IGR1_WARM_RST_INT) ||
+ (reg & FUSB300_IGR1_HOT_RST_INT) ||
+ (reg & FUSB300_IGR1_USBRST_INT)
+ )
+ goto IDMA_RESET;
+ reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_IGR0);
+ reg &= FUSB300_IGR0_EPn_PRD_INT(ep->epnum);
+ } while (!reg);
+
+ fusb300_clear_int(ep->fusb300, FUSB300_OFFSET_IGR0,
+ FUSB300_IGR0_EPn_PRD_INT(ep->epnum));
+IDMA_RESET:
+ fusb300_clear_int(ep->fusb300, FUSB300_OFFSET_IGER0,
+ FUSB300_IGER0_EEPn_PRD_INT(ep->epnum));
+}
+
+static void fusb300_set_idma(struct fusb300_ep *ep,
+ struct fusb300_request *req)
+{
+ dma_addr_t d;
+ u8 *tmp = NULL;
+
+ d = dma_map_single(NULL, req->req.buf, req->req.length, DMA_TO_DEVICE);
+
+ if (dma_mapping_error(NULL, d)) {
+ kfree(req->req.buf);
+ printk(KERN_DEBUG "dma_mapping_error\n");
+ }
+
+ dma_sync_single_for_device(NULL, d, req->req.length, DMA_TO_DEVICE);
+
+ fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_IGER0,
+ FUSB300_IGER0_EEPn_PRD_INT(ep->epnum));
+
+ tmp = req->req.buf;
+ req->req.buf = (u8 *)d;
+
+ fusb300_fill_idma_prdtbl(ep, req);
+ /* check idma is done */
+ fusb300_wait_idma_finished(ep);
+
+ req->req.buf = tmp;
+
+ if (d)
+ dma_unmap_single(NULL, d, req->req.length, DMA_TO_DEVICE);
+}
+
+static void in_ep_fifo_handler(struct fusb300_ep *ep)
+{
+ struct fusb300_request *req = list_entry(ep->queue.next,
+ struct fusb300_request, queue);
+
+ if (req->req.length) {
+#if 0
+ fusb300_set_ep_bycnt(ep, req->req.length);
+ fusb300_wrfifo(ep, req);
+#else
+ fusb300_set_idma(ep, req);
+#endif
+ }
+ done(ep, req, 0);
+}
+
+static void out_ep_fifo_handler(struct fusb300_ep *ep)
+{
+ struct fusb300 *fusb300 = ep->fusb300;
+ struct fusb300_request *req = list_entry(ep->queue.next,
+ struct fusb300_request, queue);
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPFFR(ep->epnum));
+ u32 length = reg & FUSB300_FFR_BYCNT;
+
+ fusb300_rdfifo(ep, req, length);
+
+ /* finish out transfer */
+ if ((req->req.length == req->req.actual) || (length < ep->ep.maxpacket))
+ done(ep, req, 0);
+}
+
+static void check_device_mode(struct fusb300 *fusb300)
+{
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_GCR);
+
+ switch (reg & FUSB300_GCR_DEVEN_MSK) {
+ case FUSB300_GCR_DEVEN_SS:
+ fusb300->gadget.speed = USB_SPEED_SUPER;
+ break;
+ case FUSB300_GCR_DEVEN_HS:
+ fusb300->gadget.speed = USB_SPEED_HIGH;
+ break;
+ case FUSB300_GCR_DEVEN_FS:
+ fusb300->gadget.speed = USB_SPEED_FULL;
+ break;
+ default:
+ fusb300->gadget.speed = USB_SPEED_UNKNOWN;
+ break;
+ }
+ printk(KERN_INFO "dev_mode = %d\n", (reg & FUSB300_GCR_DEVEN_MSK));
+}
+
+
+static void fusb300_ep0out(struct fusb300 *fusb300)
+{
+ struct fusb300_ep *ep = fusb300->ep[0];
+ u32 reg;
+
+ if (!list_empty(&ep->queue)) {
+ struct fusb300_request *req;
+
+ req = list_first_entry(&ep->queue,
+ struct fusb300_request, queue);
+ if (req->req.length)
+ fusb300_rdcxf(ep->fusb300, req->req.buf,
+ req->req.length);
+ done(ep, req, 0);
+ reg = ioread32(fusb300->reg + FUSB300_OFFSET_IGER1);
+ reg &= ~FUSB300_IGER1_CX_OUT_INT;
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_IGER1);
+ } else
+ pr_err("%s : empty queue\n", __func__);
+}
+
+static void fusb300_ep0in(struct fusb300 *fusb300)
+{
+ struct fusb300_request *req;
+ struct fusb300_ep *ep = fusb300->ep[0];
+
+ if ((!list_empty(&ep->queue)) && (fusb300->ep0_dir)) {
+ req = list_entry(ep->queue.next,
+ struct fusb300_request, queue);
+ if (req->req.length)
+ fusb300_wrcxf(ep, req);
+ if ((req->req.length - req->req.actual) < ep->ep.maxpacket)
+ done(ep, req, 0);
+ } else
+ fusb300_set_cxdone(fusb300);
+}
+
+static void fusb300_grp2_handler(void)
+{
+}
+
+static void fusb300_grp3_handler(void)
+{
+}
+
+static void fusb300_grp4_handler(void)
+{
+}
+
+static void fusb300_grp5_handler(void)
+{
+}
+
+static irqreturn_t fusb300_irq(int irq, void *_fusb300)
+{
+ struct fusb300 *fusb300 = _fusb300;
+ u32 int_grp1 = ioread32(fusb300->reg + FUSB300_OFFSET_IGR1);
+ u32 int_grp1_en = ioread32(fusb300->reg + FUSB300_OFFSET_IGER1);
+ u32 int_grp0 = ioread32(fusb300->reg + FUSB300_OFFSET_IGR0);
+ u32 int_grp0_en = ioread32(fusb300->reg + FUSB300_OFFSET_IGER0);
+ struct usb_ctrlrequest ctrl;
+ u8 in;
+ u32 reg;
+ int i;
+
+ spin_lock(&fusb300->lock);
+
+ int_grp1 &= int_grp1_en;
+ int_grp0 &= int_grp0_en;
+
+ if (int_grp1 & FUSB300_IGR1_WARM_RST_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_WARM_RST_INT);
+ printk(KERN_INFO"fusb300_warmreset\n");
+ fusb300_reset();
+ }
+
+ if (int_grp1 & FUSB300_IGR1_HOT_RST_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_HOT_RST_INT);
+ printk(KERN_INFO"fusb300_hotreset\n");
+ fusb300_reset();
+ }
+
+ if (int_grp1 & FUSB300_IGR1_USBRST_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_USBRST_INT);
+ fusb300_reset();
+ }
+ /* COMABT_INT has a highest priority */
+
+ if (int_grp1 & FUSB300_IGR1_CX_COMABT_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_CX_COMABT_INT);
+ printk(KERN_INFO"fusb300_ep0abt\n");
+ }
+
+ if (int_grp1 & FUSB300_IGR1_VBUS_CHG_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_VBUS_CHG_INT);
+ printk(KERN_INFO"fusb300_vbus_change\n");
+ }
+
+ if (int_grp1 & FUSB300_IGR1_U3_EXIT_FAIL_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_U3_EXIT_FAIL_INT);
+ }
+
+ if (int_grp1 & FUSB300_IGR1_U2_EXIT_FAIL_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_U2_EXIT_FAIL_INT);
+ }
+
+ if (int_grp1 & FUSB300_IGR1_U1_EXIT_FAIL_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_U1_EXIT_FAIL_INT);
+ }
+
+ if (int_grp1 & FUSB300_IGR1_U2_ENTRY_FAIL_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_U2_ENTRY_FAIL_INT);
+ }
+
+ if (int_grp1 & FUSB300_IGR1_U1_ENTRY_FAIL_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_U1_ENTRY_FAIL_INT);
+ }
+
+ if (int_grp1 & FUSB300_IGR1_U3_EXIT_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_U3_EXIT_INT);
+ printk(KERN_INFO "FUSB300_IGR1_U3_EXIT_INT\n");
+ }
+
+ if (int_grp1 & FUSB300_IGR1_U2_EXIT_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_U2_EXIT_INT);
+ printk(KERN_INFO "FUSB300_IGR1_U2_EXIT_INT\n");
+ }
+
+ if (int_grp1 & FUSB300_IGR1_U1_EXIT_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_U1_EXIT_INT);
+ printk(KERN_INFO "FUSB300_IGR1_U1_EXIT_INT\n");
+ }
+
+ if (int_grp1 & FUSB300_IGR1_U3_ENTRY_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_U3_ENTRY_INT);
+ printk(KERN_INFO "FUSB300_IGR1_U3_ENTRY_INT\n");
+ fusb300_enable_bit(fusb300, FUSB300_OFFSET_SSCR1,
+ FUSB300_SSCR1_GO_U3_DONE);
+ }
+
+ if (int_grp1 & FUSB300_IGR1_U2_ENTRY_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_U2_ENTRY_INT);
+ printk(KERN_INFO "FUSB300_IGR1_U2_ENTRY_INT\n");
+ }
+
+ if (int_grp1 & FUSB300_IGR1_U1_ENTRY_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_U1_ENTRY_INT);
+ printk(KERN_INFO "FUSB300_IGR1_U1_ENTRY_INT\n");
+ }
+
+ if (int_grp1 & FUSB300_IGR1_RESM_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_RESM_INT);
+ printk(KERN_INFO "fusb300_resume\n");
+ }
+
+ if (int_grp1 & FUSB300_IGR1_SUSP_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_SUSP_INT);
+ printk(KERN_INFO "fusb300_suspend\n");
+ }
+
+ if (int_grp1 & FUSB300_IGR1_HS_LPM_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_HS_LPM_INT);
+ printk(KERN_INFO "fusb300_HS_LPM_INT\n");
+ }
+
+ if (int_grp1 & FUSB300_IGR1_DEV_MODE_CHG_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_DEV_MODE_CHG_INT);
+ check_device_mode(fusb300);
+ }
+
+ if (int_grp1 & FUSB300_IGR1_CX_COMFAIL_INT) {
+ fusb300_set_cxstall(fusb300);
+ printk(KERN_INFO "fusb300_ep0fail\n");
+ }
+
+ if (int_grp1 & FUSB300_IGR1_CX_SETUP_INT) {
+ printk(KERN_INFO "fusb300_ep0setup\n");
+ if (setup_packet(fusb300, &ctrl)) {
+ spin_unlock(&fusb300->lock);
+ if (fusb300->driver->setup(&fusb300->gadget, &ctrl) < 0)
+ fusb300_set_cxstall(fusb300);
+ spin_lock(&fusb300->lock);
+ }
+ }
+
+ if (int_grp1 & FUSB300_IGR1_CX_CMDEND_INT)
+ printk(KERN_INFO "fusb300_cmdend\n");
+
+
+ if (int_grp1 & FUSB300_IGR1_CX_OUT_INT) {
+ printk(KERN_INFO "fusb300_cxout\n");
+ fusb300_ep0out(fusb300);
+ }
+
+ if (int_grp1 & FUSB300_IGR1_CX_IN_INT) {
+ printk(KERN_INFO "fusb300_cxin\n");
+ fusb300_ep0in(fusb300);
+ }
+
+ if (int_grp1 & FUSB300_IGR1_INTGRP5)
+ fusb300_grp5_handler();
+
+ if (int_grp1 & FUSB300_IGR1_INTGRP4)
+ fusb300_grp4_handler();
+
+ if (int_grp1 & FUSB300_IGR1_INTGRP3)
+ fusb300_grp3_handler();
+
+ if (int_grp1 & FUSB300_IGR1_INTGRP2)
+ fusb300_grp2_handler();
+
+ if (int_grp0) {
+ for (i = 1; i < FUSB300_MAX_NUM_EP; i++) {
+ if (int_grp0 & FUSB300_IGR0_EPn_FIFO_INT(i)) {
+ reg = ioread32(fusb300->reg +
+ FUSB300_OFFSET_EPSET1(i));
+ in = (reg & FUSB300_EPSET1_DIRIN) ? 1 : 0;
+ if (in)
+ in_ep_fifo_handler(fusb300->ep[i]);
+ else
+ out_ep_fifo_handler(fusb300->ep[i]);
+ }
+ }
+ }
+
+ spin_unlock(&fusb300->lock);
+
+ return IRQ_HANDLED;
+}
+
+static void fusb300_set_u2_timeout(struct fusb300 *fusb300,
+ u32 time)
+{
+ u32 reg;
+
+ reg = ioread32(fusb300->reg + FUSB300_OFFSET_TT);
+ reg &= ~0xff;
+ reg |= FUSB300_SSCR2_U2TIMEOUT(time);
+
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_TT);
+}
+
+static void fusb300_set_u1_timeout(struct fusb300 *fusb300,
+ u32 time)
+{
+ u32 reg;
+
+ reg = ioread32(fusb300->reg + FUSB300_OFFSET_TT);
+ reg &= ~(0xff << 8);
+ reg |= FUSB300_SSCR2_U1TIMEOUT(time);
+
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_TT);
+}
+
+static void init_controller(struct fusb300 *fusb300)
+{
+ u32 reg;
+ u32 mask = 0;
+ u32 val = 0;
+
+ /* split on */
+ mask = val = FUSB300_AHBBCR_S0_SPLIT_ON | FUSB300_AHBBCR_S1_SPLIT_ON;
+ reg = ioread32(fusb300->reg + FUSB300_OFFSET_AHBCR);
+ reg &= ~mask;
+ reg |= val;
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_AHBCR);
+
+ /* enable high-speed LPM */
+ mask = val = FUSB300_HSCR_HS_LPM_PERMIT;
+ reg = ioread32(fusb300->reg + FUSB300_OFFSET_HSCR);
+ reg &= ~mask;
+ reg |= val;
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_HSCR);
+
+ /*set u1 u2 timmer*/
+ fusb300_set_u2_timeout(fusb300, 0xff);
+ fusb300_set_u1_timeout(fusb300, 0xff);
+
+ /* enable all grp1 interrupt */
+ iowrite32(0xcfffff9f, fusb300->reg + FUSB300_OFFSET_IGER1);
+}
+/*------------------------------------------------------------------------*/
+static struct fusb300 *the_controller;
+
+int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+ int (*bind)(struct usb_gadget *))
+{
+ struct fusb300 *fusb300 = the_controller;
+ int retval;
+
+ if (!driver
+ || driver->speed < USB_SPEED_FULL
+ || !bind
+ || !driver->setup)
+ return -EINVAL;
+
+ if (!fusb300)
+ return -ENODEV;
+
+ if (fusb300->driver)
+ return -EBUSY;
+
+ /* hook up the driver */
+ driver->driver.bus = NULL;
+ fusb300->driver = driver;
+ fusb300->gadget.dev.driver = &driver->driver;
+
+ retval = device_add(&fusb300->gadget.dev);
+ if (retval) {
+ pr_err("device_add error (%d)\n", retval);
+ goto error;
+ }
+
+ retval = bind(&fusb300->gadget);
+ if (retval) {
+ pr_err("bind to driver error (%d)\n", retval);
+ device_del(&fusb300->gadget.dev);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ fusb300->driver = NULL;
+ fusb300->gadget.dev.driver = NULL;
+
+ return retval;
+}
+EXPORT_SYMBOL(usb_gadget_probe_driver);
+
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+ struct fusb300 *fusb300 = the_controller;
+
+ if (driver != fusb300->driver || !driver->unbind)
+ return -EINVAL;
+
+ driver->unbind(&fusb300->gadget);
+ fusb300->gadget.dev.driver = NULL;
+
+ init_controller(fusb300);
+ device_del(&fusb300->gadget.dev);
+ fusb300->driver = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+/*--------------------------------------------------------------------------*/
+
+static int fusb300_udc_pullup(struct usb_gadget *_gadget, int is_active)
+{
+ return 0;
+}
+
+static struct usb_gadget_ops fusb300_gadget_ops = {
+ .pullup = fusb300_udc_pullup,
+};
+
+static int __exit fusb300_remove(struct platform_device *pdev)
+{
+ struct fusb300 *fusb300 = dev_get_drvdata(&pdev->dev);
+
+ iounmap(fusb300->reg);
+ free_irq(platform_get_irq(pdev, 0), fusb300);
+
+ fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req);
+ kfree(fusb300);
+
+ return 0;
+}
+
+static int __init fusb300_probe(struct platform_device *pdev)
+{
+ struct resource *res, *ires, *ires1;
+ void __iomem *reg = NULL;
+ struct fusb300 *fusb300 = NULL;
+ struct fusb300_ep *_ep[FUSB300_MAX_NUM_EP];
+ int ret = 0;
+ int i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ pr_err("platform_get_resource error.\n");
+ goto clean_up;
+ }
+
+ ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!ires) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev,
+ "platform_get_resource IORESOURCE_IRQ error.\n");
+ goto clean_up;
+ }
+
+ ires1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+ if (!ires1) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev,
+ "platform_get_resource IORESOURCE_IRQ 1 error.\n");
+ goto clean_up;
+ }
+
+ reg = ioremap(res->start, resource_size(res));
+ if (reg == NULL) {
+ ret = -ENOMEM;
+ pr_err("ioremap error.\n");
+ goto clean_up;
+ }
+
+ /* initialize udc */
+ fusb300 = kzalloc(sizeof(struct fusb300), GFP_KERNEL);
+ if (fusb300 == NULL) {
+ pr_err("kzalloc error\n");
+ goto clean_up;
+ }
+
+ for (i = 0; i < FUSB300_MAX_NUM_EP; i++) {
+ _ep[i] = kzalloc(sizeof(struct fusb300_ep), GFP_KERNEL);
+ if (_ep[i] == NULL) {
+ pr_err("_ep kzalloc error\n");
+ goto clean_up;
+ }
+ fusb300->ep[i] = _ep[i];
+ }
+
+ spin_lock_init(&fusb300->lock);
+
+ dev_set_drvdata(&pdev->dev, fusb300);
+
+ fusb300->gadget.ops = &fusb300_gadget_ops;
+
+ device_initialize(&fusb300->gadget.dev);
+
+ dev_set_name(&fusb300->gadget.dev, "gadget");
+
+ fusb300->gadget.is_dualspeed = 1;
+ fusb300->gadget.dev.parent = &pdev->dev;
+ fusb300->gadget.dev.dma_mask = pdev->dev.dma_mask;
+ fusb300->gadget.dev.release = pdev->dev.release;
+ fusb300->gadget.name = udc_name;
+ fusb300->reg = reg;
+
+ ret = request_irq(ires->start, fusb300_irq, IRQF_DISABLED | IRQF_SHARED,
+ udc_name, fusb300);
+ if (ret < 0) {
+ pr_err("request_irq error (%d)\n", ret);
+ goto clean_up;
+ }
+
+ ret = request_irq(ires1->start, fusb300_irq,
+ IRQF_DISABLED | IRQF_SHARED, udc_name, fusb300);
+ if (ret < 0) {
+ pr_err("request_irq1 error (%d)\n", ret);
+ goto clean_up;
+ }
+
+ INIT_LIST_HEAD(&fusb300->gadget.ep_list);
+
+ for (i = 0; i < FUSB300_MAX_NUM_EP ; i++) {
+ struct fusb300_ep *ep = fusb300->ep[i];
+
+ if (i != 0) {
+ INIT_LIST_HEAD(&fusb300->ep[i]->ep.ep_list);
+ list_add_tail(&fusb300->ep[i]->ep.ep_list,
+ &fusb300->gadget.ep_list);
+ }
+ ep->fusb300 = fusb300;
+ INIT_LIST_HEAD(&ep->queue);
+ ep->ep.name = fusb300_ep_name[i];
+ ep->ep.ops = &fusb300_ep_ops;
+ ep->ep.maxpacket = HS_BULK_MAX_PACKET_SIZE;
+ }
+ fusb300->ep[0]->ep.maxpacket = HS_CTL_MAX_PACKET_SIZE;
+ fusb300->ep[0]->epnum = 0;
+ fusb300->gadget.ep0 = &fusb300->ep[0]->ep;
+ INIT_LIST_HEAD(&fusb300->gadget.ep0->ep_list);
+
+ the_controller = fusb300;
+
+ fusb300->ep0_req = fusb300_alloc_request(&fusb300->ep[0]->ep,
+ GFP_KERNEL);
+ if (fusb300->ep0_req == NULL)
+ goto clean_up3;
+
+ init_controller(fusb300);
+ dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
+
+ return 0;
+
+clean_up3:
+ free_irq(ires->start, fusb300);
+
+clean_up:
+ if (fusb300) {
+ if (fusb300->ep0_req)
+ fusb300_free_request(&fusb300->ep[0]->ep,
+ fusb300->ep0_req);
+ kfree(fusb300);
+ }
+ if (reg)
+ iounmap(reg);
+
+ return ret;
+}
+
+static struct platform_driver fusb300_driver = {
+ .remove = __exit_p(fusb300_remove),
+ .driver = {
+ .name = (char *) udc_name,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init fusb300_udc_init(void)
+{
+ return platform_driver_probe(&fusb300_driver, fusb300_probe);
+}
+
+module_init(fusb300_udc_init);
+
+static void __exit fusb300_udc_cleanup(void)
+{
+ platform_driver_unregister(&fusb300_driver);
+}
+module_exit(fusb300_udc_cleanup);
diff --git a/drivers/usb/gadget/fusb300_udc.h b/drivers/usb/gadget/fusb300_udc.h
new file mode 100644
index 00000000000..f51aa2ef1f9
--- /dev/null
+++ b/drivers/usb/gadget/fusb300_udc.h
@@ -0,0 +1,687 @@
+/*
+ * Fusb300 UDC (USB gadget)
+ *
+ * Copyright (C) 2010 Faraday Technology Corp.
+ *
+ * Author : Yuan-hsin Chen <yhchen@faraday-tech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+
+#ifndef __FUSB300_UDC_H__
+#define __FUSB300_UDC_H_
+
+#include <linux/kernel.h>
+
+#define FUSB300_OFFSET_GCR 0x00
+#define FUSB300_OFFSET_GTM 0x04
+#define FUSB300_OFFSET_DAR 0x08
+#define FUSB300_OFFSET_CSR 0x0C
+#define FUSB300_OFFSET_CXPORT 0x10
+#define FUSB300_OFFSET_EPSET0(n) (0x20 + (n - 1) * 0x30)
+#define FUSB300_OFFSET_EPSET1(n) (0x24 + (n - 1) * 0x30)
+#define FUSB300_OFFSET_EPSET2(n) (0x28 + (n - 1) * 0x30)
+#define FUSB300_OFFSET_EPFFR(n) (0x2c + (n - 1) * 0x30)
+#define FUSB300_OFFSET_EPSTRID(n) (0x40 + (n - 1) * 0x30)
+#define FUSB300_OFFSET_HSPTM 0x300
+#define FUSB300_OFFSET_HSCR 0x304
+#define FUSB300_OFFSET_SSCR0 0x308
+#define FUSB300_OFFSET_SSCR1 0x30C
+#define FUSB300_OFFSET_TT 0x310
+#define FUSB300_OFFSET_DEVNOTF 0x314
+#define FUSB300_OFFSET_DNC1 0x318
+#define FUSB300_OFFSET_CS 0x31C
+#define FUSB300_OFFSET_SOF 0x324
+#define FUSB300_OFFSET_EFCS 0x328
+#define FUSB300_OFFSET_IGR0 0x400
+#define FUSB300_OFFSET_IGR1 0x404
+#define FUSB300_OFFSET_IGR2 0x408
+#define FUSB300_OFFSET_IGR3 0x40C
+#define FUSB300_OFFSET_IGR4 0x410
+#define FUSB300_OFFSET_IGR5 0x414
+#define FUSB300_OFFSET_IGER0 0x420
+#define FUSB300_OFFSET_IGER1 0x424
+#define FUSB300_OFFSET_IGER2 0x428
+#define FUSB300_OFFSET_IGER3 0x42C
+#define FUSB300_OFFSET_IGER4 0x430
+#define FUSB300_OFFSET_IGER5 0x434
+#define FUSB300_OFFSET_DMAHMER 0x500
+#define FUSB300_OFFSET_EPPRDRDY 0x504
+#define FUSB300_OFFSET_DMAEPMR 0x508
+#define FUSB300_OFFSET_DMAENR 0x50C
+#define FUSB300_OFFSET_DMAAPR 0x510
+#define FUSB300_OFFSET_AHBCR 0x514
+#define FUSB300_OFFSET_EPPRD_W0(n) (0x520 + (n - 1) * 0x10)
+#define FUSB300_OFFSET_EPPRD_W1(n) (0x524 + (n - 1) * 0x10)
+#define FUSB300_OFFSET_EPPRD_W2(n) (0x528 + (n - 1) * 0x10)
+#define FUSB300_OFFSET_EPRD_PTR(n) (0x52C + (n - 1) * 0x10)
+#define FUSB300_OFFSET_BUFDBG_START 0x800
+#define FUSB300_OFFSET_BUFDBG_END 0xBFC
+#define FUSB300_OFFSET_EPPORT(n) (0x1010 + (n - 1) * 0x10)
+
+/*
+ * * Global Control Register (offset = 000H)
+ * */
+#define FUSB300_GCR_SF_RST (1 << 8)
+#define FUSB300_GCR_VBUS_STATUS (1 << 7)
+#define FUSB300_GCR_FORCE_HS_SUSP (1 << 6)
+#define FUSB300_GCR_SYNC_FIFO1_CLR (1 << 5)
+#define FUSB300_GCR_SYNC_FIFO0_CLR (1 << 4)
+#define FUSB300_GCR_FIFOCLR (1 << 3)
+#define FUSB300_GCR_GLINTEN (1 << 2)
+#define FUSB300_GCR_DEVEN_FS 0x3
+#define FUSB300_GCR_DEVEN_HS 0x2
+#define FUSB300_GCR_DEVEN_SS 0x1
+#define FUSB300_GCR_DEVDIS 0x0
+#define FUSB300_GCR_DEVEN_MSK 0x3
+
+
+/*
+ * *Global Test Mode (offset = 004H)
+ * */
+#define FUSB300_GTM_TST_DIS_SOFGEN (1 << 16)
+#define FUSB300_GTM_TST_CUR_EP_ENTRY(n) ((n & 0xF) << 12)
+#define FUSB300_GTM_TST_EP_ENTRY(n) ((n & 0xF) << 8)
+#define FUSB300_GTM_TST_EP_NUM(n) ((n & 0xF) << 4)
+#define FUSB300_GTM_TST_FIFO_DEG (1 << 1)
+#define FUSB300_GTM_TSTMODE (1 << 0)
+
+/*
+ * * Device Address Register (offset = 008H)
+ * */
+#define FUSB300_DAR_SETCONFG (1 << 7)
+#define FUSB300_DAR_DRVADDR(x) (x & 0x7F)
+#define FUSB300_DAR_DRVADDR_MSK 0x7F
+
+/*
+ * *Control Transfer Configuration and Status Register
+ * (CX_Config_Status, offset = 00CH)
+ * */
+#define FUSB300_CSR_LEN(x) ((x & 0xFFFF) << 8)
+#define FUSB300_CSR_LEN_MSK (0xFFFF << 8)
+#define FUSB300_CSR_EMP (1 << 4)
+#define FUSB300_CSR_FUL (1 << 3)
+#define FUSB300_CSR_CLR (1 << 2)
+#define FUSB300_CSR_STL (1 << 1)
+#define FUSB300_CSR_DONE (1 << 0)
+
+/*
+ * * EPn Setting 0 (EPn_SET0, offset = 020H+(n-1)*30H, n=1~15 )
+ * */
+#define FUSB300_EPSET0_CLRSEQNUM (1 << 2)
+#define FUSB300_EPSET0_EPn_TX0BYTE (1 << 1)
+#define FUSB300_EPSET0_STL (1 << 0)
+
+/*
+ * * EPn Setting 1 (EPn_SET1, offset = 024H+(n-1)*30H, n=1~15)
+ * */
+#define FUSB300_EPSET1_START_ENTRY(x) ((x & 0xFF) << 24)
+#define FUSB300_EPSET1_START_ENTRY_MSK (0xFF << 24)
+#define FUSB300_EPSET1_FIFOENTRY(x) ((x & 0x1F) << 12)
+#define FUSB300_EPSET1_FIFOENTRY_MSK (0x1f << 12)
+#define FUSB300_EPSET1_INTERVAL(x) ((x & 0x7) << 6)
+#define FUSB300_EPSET1_BWNUM(x) ((x & 0x3) << 4)
+#define FUSB300_EPSET1_TYPEISO (1 << 2)
+#define FUSB300_EPSET1_TYPEBLK (2 << 2)
+#define FUSB300_EPSET1_TYPEINT (3 << 2)
+#define FUSB300_EPSET1_TYPE(x) ((x & 0x3) << 2)
+#define FUSB300_EPSET1_TYPE_MSK (0x3 << 2)
+#define FUSB300_EPSET1_DIROUT (0 << 1)
+#define FUSB300_EPSET1_DIRIN (1 << 1)
+#define FUSB300_EPSET1_DIR(x) ((x & 0x1) << 1)
+#define FUSB300_EPSET1_DIRIN (1 << 1)
+#define FUSB300_EPSET1_DIR_MSK ((0x1) << 1)
+#define FUSB300_EPSET1_ACTDIS 0
+#define FUSB300_EPSET1_ACTEN 1
+
+/*
+ * *EPn Setting 2 (EPn_SET2, offset = 028H+(n-1)*30H, n=1~15)
+ * */
+#define FUSB300_EPSET2_ADDROFS(x) ((x & 0x7FFF) << 16)
+#define FUSB300_EPSET2_ADDROFS_MSK (0x7fff << 16)
+#define FUSB300_EPSET2_MPS(x) (x & 0x7FF)
+#define FUSB300_EPSET2_MPS_MSK 0x7FF
+
+/*
+ * * EPn FIFO Register (offset = 2cH+(n-1)*30H)
+ * */
+#define FUSB300_FFR_RST (1 << 31)
+#define FUSB300_FF_FUL (1 << 30)
+#define FUSB300_FF_EMPTY (1 << 29)
+#define FUSB300_FFR_BYCNT 0x1FFFF
+
+/*
+ * *EPn Stream ID (EPn_STR_ID, offset = 040H+(n-1)*30H, n=1~15)
+ * */
+#define FUSB300_STRID_STREN (1 << 16)
+#define FUSB300_STRID_STRID(x) (x & 0xFFFF)
+
+/*
+ * *HS PHY Test Mode (offset = 300H)
+ * */
+#define FUSB300_HSPTM_TSTPKDONE (1 << 4)
+#define FUSB300_HSPTM_TSTPKT (1 << 3)
+#define FUSB300_HSPTM_TSTSET0NAK (1 << 2)
+#define FUSB300_HSPTM_TSTKSTA (1 << 1)
+#define FUSB300_HSPTM_TSTJSTA (1 << 0)
+
+/*
+ * *HS Control Register (offset = 304H)
+ * */
+#define FUSB300_HSCR_HS_LPM_PERMIT (1 << 8)
+#define FUSB300_HSCR_HS_LPM_RMWKUP (1 << 7)
+#define FUSB300_HSCR_CAP_LPM_RMWKUP (1 << 6)
+#define FUSB300_HSCR_HS_GOSUSP (1 << 5)
+#define FUSB300_HSCR_HS_GORMWKU (1 << 4)
+#define FUSB300_HSCR_CAP_RMWKUP (1 << 3)
+#define FUSB300_HSCR_IDLECNT_0MS 0
+#define FUSB300_HSCR_IDLECNT_1MS 1
+#define FUSB300_HSCR_IDLECNT_2MS 2
+#define FUSB300_HSCR_IDLECNT_3MS 3
+#define FUSB300_HSCR_IDLECNT_4MS 4
+#define FUSB300_HSCR_IDLECNT_5MS 5
+#define FUSB300_HSCR_IDLECNT_6MS 6
+#define FUSB300_HSCR_IDLECNT_7MS 7
+
+/*
+ * * SS Controller Register 0 (offset = 308H)
+ * */
+#define FUSB300_SSCR0_MAX_INTERVAL(x) ((x & 0x7) << 4)
+#define FUSB300_SSCR0_U2_FUN_EN (1 << 1)
+#define FUSB300_SSCR0_U1_FUN_EN (1 << 0)
+
+/*
+ * * SS Controller Register 1 (offset = 30CH)
+ * */
+#define FUSB300_SSCR1_GO_U3_DONE (1 << 8)
+#define FUSB300_SSCR1_TXDEEMPH_LEVEL (1 << 7)
+#define FUSB300_SSCR1_DIS_SCRMB (1 << 6)
+#define FUSB300_SSCR1_FORCE_RECOVERY (1 << 5)
+#define FUSB300_SSCR1_U3_WAKEUP_EN (1 << 4)
+#define FUSB300_SSCR1_U2_EXIT_EN (1 << 3)
+#define FUSB300_SSCR1_U1_EXIT_EN (1 << 2)
+#define FUSB300_SSCR1_U2_ENTRY_EN (1 << 1)
+#define FUSB300_SSCR1_U1_ENTRY_EN (1 << 0)
+
+/*
+ * *SS Controller Register 2 (offset = 310H)
+ * */
+#define FUSB300_SSCR2_SS_TX_SWING (1 << 25)
+#define FUSB300_SSCR2_FORCE_LINKPM_ACCEPT (1 << 24)
+#define FUSB300_SSCR2_U2_INACT_TIMEOUT(x) ((x & 0xFF) << 16)
+#define FUSB300_SSCR2_U1TIMEOUT(x) ((x & 0xFF) << 8)
+#define FUSB300_SSCR2_U2TIMEOUT(x) (x & 0xFF)
+
+/*
+ * *SS Device Notification Control (DEV_NOTF, offset = 314H)
+ * */
+#define FUSB300_DEVNOTF_CONTEXT0(x) ((x & 0xFFFFFF) << 8)
+#define FUSB300_DEVNOTF_TYPE_DIS 0
+#define FUSB300_DEVNOTF_TYPE_FUNCWAKE 1
+#define FUSB300_DEVNOTF_TYPE_LTM 2
+#define FUSB300_DEVNOTF_TYPE_BUSINT_ADJMSG 3
+
+/*
+ * *BFM Arbiter Priority Register (BFM_ARB offset = 31CH)
+ * */
+#define FUSB300_BFMARB_ARB_M1 (1 << 3)
+#define FUSB300_BFMARB_ARB_M0 (1 << 2)
+#define FUSB300_BFMARB_ARB_S1 (1 << 1)
+#define FUSB300_BFMARB_ARB_S0 1
+
+/*
+ * *Vendor Specific IO Control Register (offset = 320H)
+ * */
+#define FUSB300_VSIC_VCTLOAD_N (1 << 8)
+#define FUSB300_VSIC_VCTL(x) (x & 0x3F)
+
+/*
+ * *SOF Mask Timer (offset = 324H)
+ * */
+#define FUSB300_SOF_MASK_TIMER_HS 0x044c
+#define FUSB300_SOF_MASK_TIMER_FS 0x2710
+
+/*
+ * *Error Flag and Control Status (offset = 328H)
+ * */
+#define FUSB300_EFCS_PM_STATE_U3 3
+#define FUSB300_EFCS_PM_STATE_U2 2
+#define FUSB300_EFCS_PM_STATE_U1 1
+#define FUSB300_EFCS_PM_STATE_U0 0
+
+/*
+ * *Interrupt Group 0 Register (offset = 400H)
+ * */
+#define FUSB300_IGR0_EP15_PRD_INT (1 << 31)
+#define FUSB300_IGR0_EP14_PRD_INT (1 << 30)
+#define FUSB300_IGR0_EP13_PRD_INT (1 << 29)
+#define FUSB300_IGR0_EP12_PRD_INT (1 << 28)
+#define FUSB300_IGR0_EP11_PRD_INT (1 << 27)
+#define FUSB300_IGR0_EP10_PRD_INT (1 << 26)
+#define FUSB300_IGR0_EP9_PRD_INT (1 << 25)
+#define FUSB300_IGR0_EP8_PRD_INT (1 << 24)
+#define FUSB300_IGR0_EP7_PRD_INT (1 << 23)
+#define FUSB300_IGR0_EP6_PRD_INT (1 << 22)
+#define FUSB300_IGR0_EP5_PRD_INT (1 << 21)
+#define FUSB300_IGR0_EP4_PRD_INT (1 << 20)
+#define FUSB300_IGR0_EP3_PRD_INT (1 << 19)
+#define FUSB300_IGR0_EP2_PRD_INT (1 << 18)
+#define FUSB300_IGR0_EP1_PRD_INT (1 << 17)
+#define FUSB300_IGR0_EPn_PRD_INT(n) (1 << (n + 16))
+
+#define FUSB300_IGR0_EP15_FIFO_INT (1 << 15)
+#define FUSB300_IGR0_EP14_FIFO_INT (1 << 14)
+#define FUSB300_IGR0_EP13_FIFO_INT (1 << 13)
+#define FUSB300_IGR0_EP12_FIFO_INT (1 << 12)
+#define FUSB300_IGR0_EP11_FIFO_INT (1 << 11)
+#define FUSB300_IGR0_EP10_FIFO_INT (1 << 10)
+#define FUSB300_IGR0_EP9_FIFO_INT (1 << 9)
+#define FUSB300_IGR0_EP8_FIFO_INT (1 << 8)
+#define FUSB300_IGR0_EP7_FIFO_INT (1 << 7)
+#define FUSB300_IGR0_EP6_FIFO_INT (1 << 6)
+#define FUSB300_IGR0_EP5_FIFO_INT (1 << 5)
+#define FUSB300_IGR0_EP4_FIFO_INT (1 << 4)
+#define FUSB300_IGR0_EP3_FIFO_INT (1 << 3)
+#define FUSB300_IGR0_EP2_FIFO_INT (1 << 2)
+#define FUSB300_IGR0_EP1_FIFO_INT (1 << 1)
+#define FUSB300_IGR0_EPn_FIFO_INT(n) (1 << n)
+
+/*
+ * *Interrupt Group 1 Register (offset = 404H)
+ * */
+#define FUSB300_IGR1_INTGRP5 (1 << 31)
+#define FUSB300_IGR1_VBUS_CHG_INT (1 << 30)
+#define FUSB300_IGR1_SYNF1_EMPTY_INT (1 << 29)
+#define FUSB300_IGR1_SYNF0_EMPTY_INT (1 << 28)
+#define FUSB300_IGR1_U3_EXIT_FAIL_INT (1 << 27)
+#define FUSB300_IGR1_U2_EXIT_FAIL_INT (1 << 26)
+#define FUSB300_IGR1_U1_EXIT_FAIL_INT (1 << 25)
+#define FUSB300_IGR1_U2_ENTRY_FAIL_INT (1 << 24)
+#define FUSB300_IGR1_U1_ENTRY_FAIL_INT (1 << 23)
+#define FUSB300_IGR1_U3_EXIT_INT (1 << 22)
+#define FUSB300_IGR1_U2_EXIT_INT (1 << 21)
+#define FUSB300_IGR1_U1_EXIT_INT (1 << 20)
+#define FUSB300_IGR1_U3_ENTRY_INT (1 << 19)
+#define FUSB300_IGR1_U2_ENTRY_INT (1 << 18)
+#define FUSB300_IGR1_U1_ENTRY_INT (1 << 17)
+#define FUSB300_IGR1_HOT_RST_INT (1 << 16)
+#define FUSB300_IGR1_WARM_RST_INT (1 << 15)
+#define FUSB300_IGR1_RESM_INT (1 << 14)
+#define FUSB300_IGR1_SUSP_INT (1 << 13)
+#define FUSB300_IGR1_HS_LPM_INT (1 << 12)
+#define FUSB300_IGR1_USBRST_INT (1 << 11)
+#define FUSB300_IGR1_DEV_MODE_CHG_INT (1 << 9)
+#define FUSB300_IGR1_CX_COMABT_INT (1 << 8)
+#define FUSB300_IGR1_CX_COMFAIL_INT (1 << 7)
+#define FUSB300_IGR1_CX_CMDEND_INT (1 << 6)
+#define FUSB300_IGR1_CX_OUT_INT (1 << 5)
+#define FUSB300_IGR1_CX_IN_INT (1 << 4)
+#define FUSB300_IGR1_CX_SETUP_INT (1 << 3)
+#define FUSB300_IGR1_INTGRP4 (1 << 2)
+#define FUSB300_IGR1_INTGRP3 (1 << 1)
+#define FUSB300_IGR1_INTGRP2 (1 << 0)
+
+/*
+ * *Interrupt Group 2 Register (offset = 408H)
+ * */
+#define FUSB300_IGR2_EP6_STR_ACCEPT_INT (1 << 29)
+#define FUSB300_IGR2_EP6_STR_RESUME_INT (1 << 28)
+#define FUSB300_IGR2_EP6_STR_REQ_INT (1 << 27)
+#define FUSB300_IGR2_EP6_STR_NOTRDY_INT (1 << 26)
+#define FUSB300_IGR2_EP6_STR_PRIME_INT (1 << 25)
+#define FUSB300_IGR2_EP5_STR_ACCEPT_INT (1 << 24)
+#define FUSB300_IGR2_EP5_STR_RESUME_INT (1 << 23)
+#define FUSB300_IGR2_EP5_STR_REQ_INT (1 << 22)
+#define FUSB300_IGR2_EP5_STR_NOTRDY_INT (1 << 21)
+#define FUSB300_IGR2_EP5_STR_PRIME_INT (1 << 20)
+#define FUSB300_IGR2_EP4_STR_ACCEPT_INT (1 << 19)
+#define FUSB300_IGR2_EP4_STR_RESUME_INT (1 << 18)
+#define FUSB300_IGR2_EP4_STR_REQ_INT (1 << 17)
+#define FUSB300_IGR2_EP4_STR_NOTRDY_INT (1 << 16)
+#define FUSB300_IGR2_EP4_STR_PRIME_INT (1 << 15)
+#define FUSB300_IGR2_EP3_STR_ACCEPT_INT (1 << 14)
+#define FUSB300_IGR2_EP3_STR_RESUME_INT (1 << 13)
+#define FUSB300_IGR2_EP3_STR_REQ_INT (1 << 12)
+#define FUSB300_IGR2_EP3_STR_NOTRDY_INT (1 << 11)
+#define FUSB300_IGR2_EP3_STR_PRIME_INT (1 << 10)
+#define FUSB300_IGR2_EP2_STR_ACCEPT_INT (1 << 9)
+#define FUSB300_IGR2_EP2_STR_RESUME_INT (1 << 8)
+#define FUSB300_IGR2_EP2_STR_REQ_INT (1 << 7)
+#define FUSB300_IGR2_EP2_STR_NOTRDY_INT (1 << 6)
+#define FUSB300_IGR2_EP2_STR_PRIME_INT (1 << 5)
+#define FUSB300_IGR2_EP1_STR_ACCEPT_INT (1 << 4)
+#define FUSB300_IGR2_EP1_STR_RESUME_INT (1 << 3)
+#define FUSB300_IGR2_EP1_STR_REQ_INT (1 << 2)
+#define FUSB300_IGR2_EP1_STR_NOTRDY_INT (1 << 1)
+#define FUSB300_IGR2_EP1_STR_PRIME_INT (1 << 0)
+
+#define FUSB300_IGR2_EP_STR_ACCEPT_INT(n) (1 << (5 * n - 1))
+#define FUSB300_IGR2_EP_STR_RESUME_INT(n) (1 << (5 * n - 2))
+#define FUSB300_IGR2_EP_STR_REQ_INT(n) (1 << (5 * n - 3))
+#define FUSB300_IGR2_EP_STR_NOTRDY_INT(n) (1 << (5 * n - 4))
+#define FUSB300_IGR2_EP_STR_PRIME_INT(n) (1 << (5 * n - 5))
+
+/*
+ * *Interrupt Group 3 Register (offset = 40CH)
+ * */
+#define FUSB300_IGR3_EP12_STR_ACCEPT_INT (1 << 29)
+#define FUSB300_IGR3_EP12_STR_RESUME_INT (1 << 28)
+#define FUSB300_IGR3_EP12_STR_REQ_INT (1 << 27)
+#define FUSB300_IGR3_EP12_STR_NOTRDY_INT (1 << 26)
+#define FUSB300_IGR3_EP12_STR_PRIME_INT (1 << 25)
+#define FUSB300_IGR3_EP11_STR_ACCEPT_INT (1 << 24)
+#define FUSB300_IGR3_EP11_STR_RESUME_INT (1 << 23)
+#define FUSB300_IGR3_EP11_STR_REQ_INT (1 << 22)
+#define FUSB300_IGR3_EP11_STR_NOTRDY_INT (1 << 21)
+#define FUSB300_IGR3_EP11_STR_PRIME_INT (1 << 20)
+#define FUSB300_IGR3_EP10_STR_ACCEPT_INT (1 << 19)
+#define FUSB300_IGR3_EP10_STR_RESUME_INT (1 << 18)
+#define FUSB300_IGR3_EP10_STR_REQ_INT (1 << 17)
+#define FUSB300_IGR3_EP10_STR_NOTRDY_INT (1 << 16)
+#define FUSB300_IGR3_EP10_STR_PRIME_INT (1 << 15)
+#define FUSB300_IGR3_EP9_STR_ACCEPT_INT (1 << 14)
+#define FUSB300_IGR3_EP9_STR_RESUME_INT (1 << 13)
+#define FUSB300_IGR3_EP9_STR_REQ_INT (1 << 12)
+#define FUSB300_IGR3_EP9_STR_NOTRDY_INT (1 << 11)
+#define FUSB300_IGR3_EP9_STR_PRIME_INT (1 << 10)
+#define FUSB300_IGR3_EP8_STR_ACCEPT_INT (1 << 9)
+#define FUSB300_IGR3_EP8_STR_RESUME_INT (1 << 8)
+#define FUSB300_IGR3_EP8_STR_REQ_INT (1 << 7)
+#define FUSB300_IGR3_EP8_STR_NOTRDY_INT (1 << 6)
+#define FUSB300_IGR3_EP8_STR_PRIME_INT (1 << 5)
+#define FUSB300_IGR3_EP7_STR_ACCEPT_INT (1 << 4)
+#define FUSB300_IGR3_EP7_STR_RESUME_INT (1 << 3)
+#define FUSB300_IGR3_EP7_STR_REQ_INT (1 << 2)
+#define FUSB300_IGR3_EP7_STR_NOTRDY_INT (1 << 1)
+#define FUSB300_IGR3_EP7_STR_PRIME_INT (1 << 0)
+
+#define FUSB300_IGR3_EP_STR_ACCEPT_INT(n) (1 << (5 * (n - 6) - 1))
+#define FUSB300_IGR3_EP_STR_RESUME_INT(n) (1 << (5 * (n - 6) - 2))
+#define FUSB300_IGR3_EP_STR_REQ_INT(n) (1 << (5 * (n - 6) - 3))
+#define FUSB300_IGR3_EP_STR_NOTRDY_INT(n) (1 << (5 * (n - 6) - 4))
+#define FUSB300_IGR3_EP_STR_PRIME_INT(n) (1 << (5 * (n - 6) - 5))
+
+/*
+ * *Interrupt Group 4 Register (offset = 410H)
+ * */
+#define FUSB300_IGR4_EP15_RX0_INT (1 << 31)
+#define FUSB300_IGR4_EP14_RX0_INT (1 << 30)
+#define FUSB300_IGR4_EP13_RX0_INT (1 << 29)
+#define FUSB300_IGR4_EP12_RX0_INT (1 << 28)
+#define FUSB300_IGR4_EP11_RX0_INT (1 << 27)
+#define FUSB300_IGR4_EP10_RX0_INT (1 << 26)
+#define FUSB300_IGR4_EP9_RX0_INT (1 << 25)
+#define FUSB300_IGR4_EP8_RX0_INT (1 << 24)
+#define FUSB300_IGR4_EP7_RX0_INT (1 << 23)
+#define FUSB300_IGR4_EP6_RX0_INT (1 << 22)
+#define FUSB300_IGR4_EP5_RX0_INT (1 << 21)
+#define FUSB300_IGR4_EP4_RX0_INT (1 << 20)
+#define FUSB300_IGR4_EP3_RX0_INT (1 << 19)
+#define FUSB300_IGR4_EP2_RX0_INT (1 << 18)
+#define FUSB300_IGR4_EP1_RX0_INT (1 << 17)
+#define FUSB300_IGR4_EP_RX0_INT(x) (1 << (x + 16))
+#define FUSB300_IGR4_EP15_STR_ACCEPT_INT (1 << 14)
+#define FUSB300_IGR4_EP15_STR_RESUME_INT (1 << 13)
+#define FUSB300_IGR4_EP15_STR_REQ_INT (1 << 12)
+#define FUSB300_IGR4_EP15_STR_NOTRDY_INT (1 << 11)
+#define FUSB300_IGR4_EP15_STR_PRIME_INT (1 << 10)
+#define FUSB300_IGR4_EP14_STR_ACCEPT_INT (1 << 9)
+#define FUSB300_IGR4_EP14_STR_RESUME_INT (1 << 8)
+#define FUSB300_IGR4_EP14_STR_REQ_INT (1 << 7)
+#define FUSB300_IGR4_EP14_STR_NOTRDY_INT (1 << 6)
+#define FUSB300_IGR4_EP14_STR_PRIME_INT (1 << 5)
+#define FUSB300_IGR4_EP13_STR_ACCEPT_INT (1 << 4)
+#define FUSB300_IGR4_EP13_STR_RESUME_INT (1 << 3)
+#define FUSB300_IGR4_EP13_STR_REQ_INT (1 << 2)
+#define FUSB300_IGR4_EP13_STR_NOTRDY_INT (1 << 1)
+#define FUSB300_IGR4_EP13_STR_PRIME_INT (1 << 0)
+
+#define FUSB300_IGR4_EP_STR_ACCEPT_INT(n) (1 << (5 * (n - 12) - 1))
+#define FUSB300_IGR4_EP_STR_RESUME_INT(n) (1 << (5 * (n - 12) - 2))
+#define FUSB300_IGR4_EP_STR_REQ_INT(n) (1 << (5 * (n - 12) - 3))
+#define FUSB300_IGR4_EP_STR_NOTRDY_INT(n) (1 << (5 * (n - 12) - 4))
+#define FUSB300_IGR4_EP_STR_PRIME_INT(n) (1 << (5 * (n - 12) - 5))
+
+/*
+ * *Interrupt Group 5 Register (offset = 414H)
+ * */
+#define FUSB300_IGR5_EP_STL_INT(n) (1 << n)
+
+/*
+ * *Interrupt Enable Group 0 Register (offset = 420H)
+ * */
+#define FUSB300_IGER0_EEP15_PRD_INT (1 << 31)
+#define FUSB300_IGER0_EEP14_PRD_INT (1 << 30)
+#define FUSB300_IGER0_EEP13_PRD_INT (1 << 29)
+#define FUSB300_IGER0_EEP12_PRD_INT (1 << 28)
+#define FUSB300_IGER0_EEP11_PRD_INT (1 << 27)
+#define FUSB300_IGER0_EEP10_PRD_INT (1 << 26)
+#define FUSB300_IGER0_EEP9_PRD_INT (1 << 25)
+#define FUSB300_IGER0_EP8_PRD_INT (1 << 24)
+#define FUSB300_IGER0_EEP7_PRD_INT (1 << 23)
+#define FUSB300_IGER0_EEP6_PRD_INT (1 << 22)
+#define FUSB300_IGER0_EEP5_PRD_INT (1 << 21)
+#define FUSB300_IGER0_EEP4_PRD_INT (1 << 20)
+#define FUSB300_IGER0_EEP3_PRD_INT (1 << 19)
+#define FUSB300_IGER0_EEP2_PRD_INT (1 << 18)
+#define FUSB300_IGER0_EEP1_PRD_INT (1 << 17)
+#define FUSB300_IGER0_EEPn_PRD_INT(n) (1 << (n + 16))
+
+#define FUSB300_IGER0_EEP15_FIFO_INT (1 << 15)
+#define FUSB300_IGER0_EEP14_FIFO_INT (1 << 14)
+#define FUSB300_IGER0_EEP13_FIFO_INT (1 << 13)
+#define FUSB300_IGER0_EEP12_FIFO_INT (1 << 12)
+#define FUSB300_IGER0_EEP11_FIFO_INT (1 << 11)
+#define FUSB300_IGER0_EEP10_FIFO_INT (1 << 10)
+#define FUSB300_IGER0_EEP9_FIFO_INT (1 << 9)
+#define FUSB300_IGER0_EEP8_FIFO_INT (1 << 8)
+#define FUSB300_IGER0_EEP7_FIFO_INT (1 << 7)
+#define FUSB300_IGER0_EEP6_FIFO_INT (1 << 6)
+#define FUSB300_IGER0_EEP5_FIFO_INT (1 << 5)
+#define FUSB300_IGER0_EEP4_FIFO_INT (1 << 4)
+#define FUSB300_IGER0_EEP3_FIFO_INT (1 << 3)
+#define FUSB300_IGER0_EEP2_FIFO_INT (1 << 2)
+#define FUSB300_IGER0_EEP1_FIFO_INT (1 << 1)
+#define FUSB300_IGER0_EEPn_FIFO_INT(n) (1 << n)
+
+/*
+ * *Interrupt Enable Group 1 Register (offset = 424H)
+ * */
+#define FUSB300_IGER1_EINT_GRP5 (1 << 31)
+#define FUSB300_IGER1_VBUS_CHG_INT (1 << 30)
+#define FUSB300_IGER1_SYNF1_EMPTY_INT (1 << 29)
+#define FUSB300_IGER1_SYNF0_EMPTY_INT (1 << 28)
+#define FUSB300_IGER1_U3_EXIT_FAIL_INT (1 << 27)
+#define FUSB300_IGER1_U2_EXIT_FAIL_INT (1 << 26)
+#define FUSB300_IGER1_U1_EXIT_FAIL_INT (1 << 25)
+#define FUSB300_IGER1_U2_ENTRY_FAIL_INT (1 << 24)
+#define FUSB300_IGER1_U1_ENTRY_FAIL_INT (1 << 23)
+#define FUSB300_IGER1_U3_EXIT_INT (1 << 22)
+#define FUSB300_IGER1_U2_EXIT_INT (1 << 21)
+#define FUSB300_IGER1_U1_EXIT_INT (1 << 20)
+#define FUSB300_IGER1_U3_ENTRY_INT (1 << 19)
+#define FUSB300_IGER1_U2_ENTRY_INT (1 << 18)
+#define FUSB300_IGER1_U1_ENTRY_INT (1 << 17)
+#define FUSB300_IGER1_HOT_RST_INT (1 << 16)
+#define FUSB300_IGER1_WARM_RST_INT (1 << 15)
+#define FUSB300_IGER1_RESM_INT (1 << 14)
+#define FUSB300_IGER1_SUSP_INT (1 << 13)
+#define FUSB300_IGER1_LPM_INT (1 << 12)
+#define FUSB300_IGER1_HS_RST_INT (1 << 11)
+#define FUSB300_IGER1_EDEV_MODE_CHG_INT (1 << 9)
+#define FUSB300_IGER1_CX_COMABT_INT (1 << 8)
+#define FUSB300_IGER1_CX_COMFAIL_INT (1 << 7)
+#define FUSB300_IGER1_CX_CMDEND_INT (1 << 6)
+#define FUSB300_IGER1_CX_OUT_INT (1 << 5)
+#define FUSB300_IGER1_CX_IN_INT (1 << 4)
+#define FUSB300_IGER1_CX_SETUP_INT (1 << 3)
+#define FUSB300_IGER1_INTGRP4 (1 << 2)
+#define FUSB300_IGER1_INTGRP3 (1 << 1)
+#define FUSB300_IGER1_INTGRP2 (1 << 0)
+
+/*
+ * *Interrupt Enable Group 2 Register (offset = 428H)
+ * */
+#define FUSB300_IGER2_EEP_STR_ACCEPT_INT(n) (1 << (5 * n - 1))
+#define FUSB300_IGER2_EEP_STR_RESUME_INT(n) (1 << (5 * n - 2))
+#define FUSB300_IGER2_EEP_STR_REQ_INT(n) (1 << (5 * n - 3))
+#define FUSB300_IGER2_EEP_STR_NOTRDY_INT(n) (1 << (5 * n - 4))
+#define FUSB300_IGER2_EEP_STR_PRIME_INT(n) (1 << (5 * n - 5))
+
+/*
+ * *Interrupt Enable Group 3 Register (offset = 42CH)
+ * */
+
+#define FUSB300_IGER3_EEP_STR_ACCEPT_INT(n) (1 << (5 * (n - 6) - 1))
+#define FUSB300_IGER3_EEP_STR_RESUME_INT(n) (1 << (5 * (n - 6) - 2))
+#define FUSB300_IGER3_EEP_STR_REQ_INT(n) (1 << (5 * (n - 6) - 3))
+#define FUSB300_IGER3_EEP_STR_NOTRDY_INT(n) (1 << (5 * (n - 6) - 4))
+#define FUSB300_IGER3_EEP_STR_PRIME_INT(n) (1 << (5 * (n - 6) - 5))
+
+/*
+ * *Interrupt Enable Group 4 Register (offset = 430H)
+ * */
+
+#define FUSB300_IGER4_EEP_RX0_INT(n) (1 << (n + 16))
+#define FUSB300_IGER4_EEP_STR_ACCEPT_INT(n) (1 << (5 * (n - 6) - 1))
+#define FUSB300_IGER4_EEP_STR_RESUME_INT(n) (1 << (5 * (n - 6) - 2))
+#define FUSB300_IGER4_EEP_STR_REQ_INT(n) (1 << (5 * (n - 6) - 3))
+#define FUSB300_IGER4_EEP_STR_NOTRDY_INT(n) (1 << (5 * (n - 6) - 4))
+#define FUSB300_IGER4_EEP_STR_PRIME_INT(n) (1 << (5 * (n - 6) - 5))
+
+/* EP PRD Ready (EP_PRD_RDY, offset = 504H) */
+
+#define FUSB300_EPPRDR_EP15_PRD_RDY (1 << 15)
+#define FUSB300_EPPRDR_EP14_PRD_RDY (1 << 14)
+#define FUSB300_EPPRDR_EP13_PRD_RDY (1 << 13)
+#define FUSB300_EPPRDR_EP12_PRD_RDY (1 << 12)
+#define FUSB300_EPPRDR_EP11_PRD_RDY (1 << 11)
+#define FUSB300_EPPRDR_EP10_PRD_RDY (1 << 10)
+#define FUSB300_EPPRDR_EP9_PRD_RDY (1 << 9)
+#define FUSB300_EPPRDR_EP8_PRD_RDY (1 << 8)
+#define FUSB300_EPPRDR_EP7_PRD_RDY (1 << 7)
+#define FUSB300_EPPRDR_EP6_PRD_RDY (1 << 6)
+#define FUSB300_EPPRDR_EP5_PRD_RDY (1 << 5)
+#define FUSB300_EPPRDR_EP4_PRD_RDY (1 << 4)
+#define FUSB300_EPPRDR_EP3_PRD_RDY (1 << 3)
+#define FUSB300_EPPRDR_EP2_PRD_RDY (1 << 2)
+#define FUSB300_EPPRDR_EP1_PRD_RDY (1 << 1)
+#define FUSB300_EPPRDR_EP_PRD_RDY(n) (1 << n)
+
+/* AHB Bus Control Register (offset = 514H) */
+#define FUSB300_AHBBCR_S1_SPLIT_ON (1 << 17)
+#define FUSB300_AHBBCR_S0_SPLIT_ON (1 << 16)
+#define FUSB300_AHBBCR_S1_1entry (0 << 12)
+#define FUSB300_AHBBCR_S1_4entry (3 << 12)
+#define FUSB300_AHBBCR_S1_8entry (5 << 12)
+#define FUSB300_AHBBCR_S1_16entry (7 << 12)
+#define FUSB300_AHBBCR_S0_1entry (0 << 8)
+#define FUSB300_AHBBCR_S0_4entry (3 << 8)
+#define FUSB300_AHBBCR_S0_8entry (5 << 8)
+#define FUSB300_AHBBCR_S0_16entry (7 << 8)
+#define FUSB300_AHBBCR_M1_BURST_SINGLE (0 << 4)
+#define FUSB300_AHBBCR_M1_BURST_INCR (1 << 4)
+#define FUSB300_AHBBCR_M1_BURST_INCR4 (3 << 4)
+#define FUSB300_AHBBCR_M1_BURST_INCR8 (5 << 4)
+#define FUSB300_AHBBCR_M1_BURST_INCR16 (7 << 4)
+#define FUSB300_AHBBCR_M0_BURST_SINGLE 0
+#define FUSB300_AHBBCR_M0_BURST_INCR 1
+#define FUSB300_AHBBCR_M0_BURST_INCR4 3
+#define FUSB300_AHBBCR_M0_BURST_INCR8 5
+#define FUSB300_AHBBCR_M0_BURST_INCR16 7
+#define FUSB300_IGER5_EEP_STL_INT(n) (1 << n)
+
+/* WORD 0 Data Structure of PRD Table */
+#define FUSB300_EPPRD0_M (1 << 30)
+#define FUSB300_EPPRD0_O (1 << 29)
+/* The finished prd */
+#define FUSB300_EPPRD0_F (1 << 28)
+#define FUSB300_EPPRD0_I (1 << 27)
+#define FUSB300_EPPRD0_A (1 << 26)
+/* To decide HW point to first prd at next time */
+#define FUSB300_EPPRD0_L (1 << 25)
+#define FUSB300_EPPRD0_H (1 << 24)
+#define FUSB300_EPPRD0_BTC(n) (n & 0xFFFFFF)
+
+/*----------------------------------------------------------------------*/
+#define FUSB300_MAX_NUM_EP 16
+
+#define FUSB300_FIFO_ENTRY_NUM 8
+#define FUSB300_MAX_FIFO_ENTRY 8
+
+#define SS_CTL_MAX_PACKET_SIZE 0x200
+#define SS_BULK_MAX_PACKET_SIZE 0x400
+#define SS_INT_MAX_PACKET_SIZE 0x400
+#define SS_ISO_MAX_PACKET_SIZE 0x400
+
+#define HS_BULK_MAX_PACKET_SIZE 0x200
+#define HS_CTL_MAX_PACKET_SIZE 0x40
+#define HS_INT_MAX_PACKET_SIZE 0x400
+#define HS_ISO_MAX_PACKET_SIZE 0x400
+
+struct fusb300_ep_info {
+ u8 epnum;
+ u8 type;
+ u8 interval;
+ u8 dir_in;
+ u16 maxpacket;
+ u16 addrofs;
+ u16 bw_num;
+};
+
+struct fusb300_request {
+
+ struct usb_request req;
+ struct list_head queue;
+};
+
+
+struct fusb300_ep {
+ struct usb_ep ep;
+ struct fusb300 *fusb300;
+
+ struct list_head queue;
+ unsigned stall:1;
+ unsigned wedged:1;
+ unsigned use_dma:1;
+
+ unsigned char epnum;
+ unsigned char type;
+ const struct usb_endpoint_descriptor *desc;
+};
+
+struct fusb300 {
+ spinlock_t lock;
+ void __iomem *reg;
+
+ unsigned long irq_trigger;
+
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+
+ struct fusb300_ep *ep[FUSB300_MAX_NUM_EP];
+
+ struct usb_request *ep0_req; /* for internal request */
+ __le16 ep0_data;
+ u32 ep0_length; /* for internal request */
+ u8 ep0_dir; /* 0/0x80 out/in */
+
+ u8 fifo_entry_num; /* next start fifo entry */
+ u32 addrofs; /* next fifo address offset */
+ u8 reenum; /* if re-enumeration */
+};
+
+#endif
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index 51b19f3027e..084aa080a2d 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -258,7 +258,7 @@ static int pipe_buffer_setting(struct m66592 *m66592,
break;
case M66592_BULK:
/* isochronous pipes may be used as bulk pipes */
- if (info->pipe > M66592_BASE_PIPENUM_BULK)
+ if (info->pipe >= M66592_BASE_PIPENUM_BULK)
bufnum = info->pipe - M66592_BASE_PIPENUM_BULK;
else
bufnum = info->pipe - M66592_BASE_PIPENUM_ISOC;
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
index b120dbb64d0..3e4b35e50c2 100644
--- a/drivers/usb/gadget/pch_udc.c
+++ b/drivers/usb/gadget/pch_udc.c
@@ -367,7 +367,6 @@ struct pch_udc_dev {
static const char ep0_string[] = "ep0in";
static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */
struct pch_udc_dev *pch_udc; /* pointer to device object */
-
static int speed_fs;
module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
@@ -383,6 +382,8 @@ MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
* @dma_mapped: DMA memory mapped for request
* @dma_done: DMA completed for request
* @chain_len: chain length
+ * @buf: Buffer memory for align adjustment
+ * @dma: DMA memory for align adjustment
*/
struct pch_udc_request {
struct usb_request req;
@@ -394,6 +395,8 @@ struct pch_udc_request {
dma_mapped:1,
dma_done:1;
unsigned chain_len;
+ void *buf;
+ dma_addr_t dma;
};
static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
@@ -615,7 +618,7 @@ static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
/**
* pch_udc_ep_set_bufsz() - Set the maximum packet size for the endpoint
* @ep: Reference to structure of type pch_udc_ep_regs
- * @buf_size: The buffer size
+ * @buf_size: The buffer word size
*/
static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
u32 buf_size, u32 ep_in)
@@ -635,7 +638,7 @@ static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
/**
* pch_udc_ep_set_maxpkt() - Set the Max packet size for the endpoint
* @ep: Reference to structure of type pch_udc_ep_regs
- * @pkt_size: The packet size
+ * @pkt_size: The packet byte size
*/
static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
{
@@ -920,25 +923,10 @@ static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
*/
static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
{
- unsigned int loopcnt = 0;
- struct pch_udc_dev *dev = ep->dev;
-
if (dir) { /* IN ep */
pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
return;
}
-
- if (pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP)
- return;
- pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_MRXFLUSH);
- /* Wait for RxFIFO Empty */
- loopcnt = 10000;
- while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
- --loopcnt)
- udelay(5);
- if (!loopcnt)
- dev_err(&dev->pdev->dev, "RxFIFO not Empty\n");
- pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_MRXFLUSH);
}
/**
@@ -1220,14 +1208,31 @@ static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
dev = ep->dev;
if (req->dma_mapped) {
- if (ep->in)
- dma_unmap_single(&dev->pdev->dev, req->req.dma,
- req->req.length, DMA_TO_DEVICE);
- else
- dma_unmap_single(&dev->pdev->dev, req->req.dma,
- req->req.length, DMA_FROM_DEVICE);
+ if (req->dma == DMA_ADDR_INVALID) {
+ if (ep->in)
+ dma_unmap_single(&dev->pdev->dev, req->req.dma,
+ req->req.length,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(&dev->pdev->dev, req->req.dma,
+ req->req.length,
+ DMA_FROM_DEVICE);
+ req->req.dma = DMA_ADDR_INVALID;
+ } else {
+ if (ep->in)
+ dma_unmap_single(&dev->pdev->dev, req->dma,
+ req->req.length,
+ DMA_TO_DEVICE);
+ else {
+ dma_unmap_single(&dev->pdev->dev, req->dma,
+ req->req.length,
+ DMA_FROM_DEVICE);
+ memcpy(req->req.buf, req->buf, req->req.length);
+ }
+ kfree(req->buf);
+ req->dma = DMA_ADDR_INVALID;
+ }
req->dma_mapped = 0;
- req->req.dma = DMA_ADDR_INVALID;
}
ep->halted = 1;
spin_unlock(&dev->lock);
@@ -1268,12 +1273,18 @@ static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
struct pch_udc_data_dma_desc *td = req->td_data;
unsigned i = req->chain_len;
+ dma_addr_t addr2;
+ dma_addr_t addr = (dma_addr_t)td->next;
+ td->next = 0x00;
for (; i > 1; --i) {
- dma_addr_t addr = (dma_addr_t)td->next;
/* do not free first desc., will be done by free for request */
td = phys_to_virt(addr);
+ addr2 = (dma_addr_t)td->next;
pci_pool_free(dev->data_requests, td, addr);
+ td->next = 0x00;
+ addr = addr2;
}
+ req->chain_len = 1;
}
/**
@@ -1301,23 +1312,23 @@ static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
if (req->chain_len > 1)
pch_udc_free_dma_chain(ep->dev, req);
- for (; ; bytes -= buf_len, ++len) {
- if (ep->in)
- td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
- else
- td->status = PCH_UDC_BS_HST_BSY;
+ if (req->dma == DMA_ADDR_INVALID)
+ td->dataptr = req->req.dma;
+ else
+ td->dataptr = req->dma;
+ td->status = PCH_UDC_BS_HST_BSY;
+ for (; ; bytes -= buf_len, ++len) {
+ td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
if (bytes <= buf_len)
break;
-
last = td;
td = pci_pool_alloc(ep->dev->data_requests, gfp_flags,
&dma_addr);
if (!td)
goto nomem;
-
i += buf_len;
- td->dataptr = req->req.dma + i;
+ td->dataptr = req->td_data->dataptr + i;
last->next = dma_addr;
}
@@ -1352,28 +1363,15 @@ static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
{
int retval;
- req->td_data->dataptr = req->req.dma;
- req->td_data->status |= PCH_UDC_DMA_LAST;
/* Allocate and create a DMA chain */
retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
if (retval) {
- pr_err("%s: could not create DMA chain: %d\n",
- __func__, retval);
+ pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
return retval;
}
- if (!ep->in)
- return 0;
- if (req->req.length <= ep->ep.maxpacket)
- req->td_data->status = PCH_UDC_DMA_LAST | PCH_UDC_BS_HST_BSY |
- req->req.length;
- /* if bytes < max packet then tx bytes must
- * be written in packet per buffer mode
- */
- if ((req->req.length < ep->ep.maxpacket) || !ep->num)
+ if (ep->in)
req->td_data->status = (req->td_data->status &
- ~PCH_UDC_RXTX_BYTES) | req->req.length;
- req->td_data->status = (req->td_data->status &
- ~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_BSY;
+ ~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
return 0;
}
@@ -1529,6 +1527,7 @@ static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
if (!req)
return NULL;
req->req.dma = DMA_ADDR_INVALID;
+ req->dma = DMA_ADDR_INVALID;
INIT_LIST_HEAD(&req->queue);
if (!ep->dev->dma_addr)
return &req->req;
@@ -1613,16 +1612,33 @@ static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
/* map the buffer for dma */
if (usbreq->length &&
((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
- if (ep->in)
- usbreq->dma = dma_map_single(&dev->pdev->dev,
- usbreq->buf,
- usbreq->length,
- DMA_TO_DEVICE);
- else
- usbreq->dma = dma_map_single(&dev->pdev->dev,
- usbreq->buf,
- usbreq->length,
- DMA_FROM_DEVICE);
+ if (!((unsigned long)(usbreq->buf) & 0x03)) {
+ if (ep->in)
+ usbreq->dma = dma_map_single(&dev->pdev->dev,
+ usbreq->buf,
+ usbreq->length,
+ DMA_TO_DEVICE);
+ else
+ usbreq->dma = dma_map_single(&dev->pdev->dev,
+ usbreq->buf,
+ usbreq->length,
+ DMA_FROM_DEVICE);
+ } else {
+ req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
+ if (!req->buf)
+ return -ENOMEM;
+ if (ep->in) {
+ memcpy(req->buf, usbreq->buf, usbreq->length);
+ req->dma = dma_map_single(&dev->pdev->dev,
+ req->buf,
+ usbreq->length,
+ DMA_TO_DEVICE);
+ } else
+ req->dma = dma_map_single(&dev->pdev->dev,
+ req->buf,
+ usbreq->length,
+ DMA_FROM_DEVICE);
+ }
req->dma_mapped = 1;
}
if (usbreq->length > 0) {
@@ -1920,32 +1936,46 @@ static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
struct pch_udc_request *req;
struct pch_udc_dev *dev = ep->dev;
unsigned int count;
+ struct pch_udc_data_dma_desc *td;
+ dma_addr_t addr;
if (list_empty(&ep->queue))
return;
-
/* next request */
req = list_entry(ep->queue.next, struct pch_udc_request, queue);
- if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
- PCH_UDC_BS_DMA_DONE)
- return;
pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
pch_udc_ep_set_ddptr(ep, 0);
- if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
- PCH_UDC_RTS_SUCC) {
- dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
- "epstatus=0x%08x\n",
- (req->td_data_last->status & PCH_UDC_RXTX_STS),
- (int)(ep->epsts));
- return;
- }
- count = req->td_data_last->status & PCH_UDC_RXTX_BYTES;
+ if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
+ PCH_UDC_BS_DMA_DONE)
+ td = req->td_data_last;
+ else
+ td = req->td_data;
+ while (1) {
+ if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
+ dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
+ "epstatus=0x%08x\n",
+ (req->td_data->status & PCH_UDC_RXTX_STS),
+ (int)(ep->epsts));
+ return;
+ }
+ if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
+ if (td->status | PCH_UDC_DMA_LAST) {
+ count = td->status & PCH_UDC_RXTX_BYTES;
+ break;
+ }
+ if (td == req->td_data_last) {
+ dev_err(&dev->pdev->dev, "Not complete RX descriptor");
+ return;
+ }
+ addr = (dma_addr_t)td->next;
+ td = phys_to_virt(addr);
+ }
/* on 64k packets the RXBYTES field is zero */
if (!count && (req->req.length == UDC_DMA_MAXPACKET))
count = UDC_DMA_MAXPACKET;
req->td_data->status |= PCH_UDC_DMA_LAST;
- req->td_data_last->status |= PCH_UDC_BS_HST_BSY;
+ td->status |= PCH_UDC_BS_HST_BSY;
req->dma_going = 0;
req->req.actual = count;
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index c2448950a8d..6d8b04061d5 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -902,7 +902,7 @@ static irqreturn_t s3c2410_udc_irq(int dummy, void *_dev)
int pwr_reg;
int ep0csr;
int i;
- u32 idx;
+ u32 idx, idx2;
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
@@ -1017,6 +1017,20 @@ static irqreturn_t s3c2410_udc_irq(int dummy, void *_dev)
}
}
+ /* what else causes this interrupt? a receive! who is it? */
+ if (!usb_status && !usbd_status && !pwr_reg && !ep0csr) {
+ for (i = 1; i < S3C2410_ENDPOINTS; i++) {
+ idx2 = udc_read(S3C2410_UDC_INDEX_REG);
+ udc_write(i, S3C2410_UDC_INDEX_REG);
+
+ if (udc_read(S3C2410_UDC_OUT_CSR1_REG) & 0x1)
+ s3c2410_udc_handle_ep(&dev->ep[i]);
+
+ /* restore index */
+ udc_write(idx2, S3C2410_UDC_INDEX_REG);
+ }
+ }
+
dprintk(DEBUG_VERBOSE, "irq: %d s3c2410_udc_done.\n", IRQ_USBD);
/* Restore old index */
@@ -1467,7 +1481,9 @@ static int s3c2410_udc_set_pullup(struct s3c2410_udc *udc, int is_on)
{
dprintk(DEBUG_NORMAL, "%s()\n", __func__);
- if (udc_info && udc_info->udc_command) {
+ if (udc_info && (udc_info->udc_command ||
+ gpio_is_valid(udc_info->pullup_pin))) {
+
if (is_on)
s3c2410_udc_enable(udc);
else {
@@ -1544,6 +1560,32 @@ static const struct usb_gadget_ops s3c2410_ops = {
.vbus_draw = s3c2410_vbus_draw,
};
+static void s3c2410_udc_command(enum s3c2410_udc_cmd_e cmd)
+{
+ if (!udc_info)
+ return;
+
+ if (udc_info->udc_command) {
+ udc_info->udc_command(S3C2410_UDC_P_DISABLE);
+ } else if (gpio_is_valid(udc_info->pullup_pin)) {
+ int value;
+
+ switch (cmd) {
+ case S3C2410_UDC_P_ENABLE:
+ value = 1;
+ break;
+ case S3C2410_UDC_P_DISABLE:
+ value = 0;
+ break;
+ default:
+ return;
+ }
+ value ^= udc_info->pullup_pin_inverted;
+
+ gpio_set_value(udc_info->pullup_pin, value);
+ }
+}
+
/*------------------------- gadget driver handling---------------------------*/
/*
* s3c2410_udc_disable
@@ -1565,8 +1607,7 @@ static void s3c2410_udc_disable(struct s3c2410_udc *dev)
udc_write(0x1F, S3C2410_UDC_EP_INT_REG);
/* Good bye, cruel world */
- if (udc_info && udc_info->udc_command)
- udc_info->udc_command(S3C2410_UDC_P_DISABLE);
+ s3c2410_udc_command(S3C2410_UDC_P_DISABLE);
/* Set speed to unknown */
dev->gadget.speed = USB_SPEED_UNKNOWN;
@@ -1627,8 +1668,7 @@ static void s3c2410_udc_enable(struct s3c2410_udc *dev)
udc_write(S3C2410_UDC_INT_EP0, S3C2410_UDC_EP_INT_EN_REG);
/* time to say "hello, world" */
- if (udc_info && udc_info->udc_command)
- udc_info->udc_command(S3C2410_UDC_P_ENABLE);
+ s3c2410_udc_command(S3C2410_UDC_P_ENABLE);
}
/*
@@ -1903,6 +1943,17 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
udc->vbus = 1;
}
+ if (udc_info && !udc_info->udc_command &&
+ gpio_is_valid(udc_info->pullup_pin)) {
+
+ retval = gpio_request_one(udc_info->pullup_pin,
+ udc_info->vbus_pin_inverted ?
+ GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
+ "udc pullup");
+ if (retval)
+ goto err_vbus_irq;
+ }
+
if (s3c2410_udc_debugfs_root) {
udc->regs_info = debugfs_create_file("registers", S_IRUGO,
s3c2410_udc_debugfs_root,
@@ -1915,6 +1966,9 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
return 0;
+err_vbus_irq:
+ if (udc_info && udc_info->vbus_pin > 0)
+ free_irq(gpio_to_irq(udc_info->vbus_pin), udc);
err_gpio_claim:
if (udc_info && udc_info->vbus_pin > 0)
gpio_free(udc_info->vbus_pin);
@@ -1942,6 +1996,10 @@ static int s3c2410_udc_remove(struct platform_device *pdev)
debugfs_remove(udc->regs_info);
+ if (udc_info && !udc_info->udc_command &&
+ gpio_is_valid(udc_info->pullup_pin))
+ gpio_free(udc_info->pullup_pin);
+
if (udc_info && udc_info->vbus_pin > 0) {
irq = gpio_to_irq(udc_info->vbus_pin);
free_irq(irq, udc);
@@ -1973,16 +2031,14 @@ static int s3c2410_udc_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int s3c2410_udc_suspend(struct platform_device *pdev, pm_message_t message)
{
- if (udc_info && udc_info->udc_command)
- udc_info->udc_command(S3C2410_UDC_P_DISABLE);
+ s3c2410_udc_command(S3C2410_UDC_P_DISABLE);
return 0;
}
static int s3c2410_udc_resume(struct platform_device *pdev)
{
- if (udc_info && udc_info->udc_command)
- udc_info->udc_command(S3C2410_UDC_P_ENABLE);
+ s3c2410_udc_command(S3C2410_UDC_P_ENABLE);
return 0;
}
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 1eda968b564..2ac1d214732 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -241,7 +241,7 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
size -= size % out->maxpacket;
if (dev->port_usb->is_fixed)
- size = max(size, dev->port_usb->fixed_out_len);
+ size = max_t(size_t, size, dev->port_usb->fixed_out_len);
skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
if (skb == NULL) {
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 0e6afa260ed..9483acdf2e9 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -91,17 +91,28 @@ config USB_EHCI_TT_NEWSCHED
If unsure, say Y.
+config USB_EHCI_HCD_PMC_MSP
+ tristate "EHCI support for on-chip PMC MSP71xx USB controller"
+ depends on USB_EHCI_HCD && MSP_HAS_USB
+ default n
+ select USB_EHCI_BIG_ENDIAN_DESC
+ select USB_EHCI_BIG_ENDIAN_MMIO
+ ---help---
+ Enables support for the onchip USB controller on the PMC_MSP7100 Family SoC's.
+ If unsure, say N.
+
config USB_EHCI_BIG_ENDIAN_MMIO
bool
depends on USB_EHCI_HCD && (PPC_CELLEB || PPC_PS3 || 440EPX || \
ARCH_IXP4XX || XPS_USB_HCD_XILINX || \
- PPC_MPC512x || CPU_CAVIUM_OCTEON)
+ PPC_MPC512x || CPU_CAVIUM_OCTEON || \
+ PMC_MSP)
default y
config USB_EHCI_BIG_ENDIAN_DESC
bool
depends on USB_EHCI_HCD && (440EPX || ARCH_IXP4XX || XPS_USB_HCD_XILINX || \
- PPC_MPC512x)
+ PPC_MPC512x || PMC_MSP)
default y
config XPS_USB_HCD_XILINX
@@ -145,7 +156,7 @@ config USB_EHCI_MSM
bool "Support for MSM on-chip EHCI USB controller"
depends on USB_EHCI_HCD && ARCH_MSM
select USB_EHCI_ROOT_HUB_TT
- select USB_MSM_OTG_72K
+ select USB_MSM_OTG
---help---
Enables support for the USB Host controller present on the
Qualcomm chipsets. Root Hub has inbuilt TT.
@@ -154,6 +165,14 @@ config USB_EHCI_MSM
This driver is not supported on boards like trout which
has an external PHY.
+config USB_EHCI_TEGRA
+ boolean "NVIDIA Tegra HCD support"
+ depends on USB_EHCI_HCD && ARCH_TEGRA
+ select USB_EHCI_ROOT_HUB_TT
+ help
+ This driver enables support for the internal USB Host Controllers
+ found in NVIDIA Tegra SoCs. The controllers are EHCI compliant.
+
config USB_EHCI_HCD_PPC_OF
bool "EHCI support for PPC USB controller on OF platform bus"
depends on USB_EHCI_HCD && PPC_OF
@@ -162,6 +181,13 @@ config USB_EHCI_HCD_PPC_OF
Enables support for the USB controller present on the PowerPC
OpenFirmware platform bus.
+config USB_EHCI_SH
+ bool "EHCI support for SuperH USB controller"
+ depends on USB_EHCI_HCD && SUPERH
+ ---help---
+ Enables support for the on-chip EHCI controller on the SuperH.
+ If you use the PCI EHCI controller, this option is not necessary.
+
config USB_W90X900_EHCI
bool "W90X900(W90P910) EHCI support"
depends on USB_EHCI_HCD && ARCH_W90X900
@@ -315,6 +341,13 @@ config USB_OHCI_HCD_SSB
If unsure, say N.
+config USB_OHCI_SH
+ bool "OHCI support for SuperH USB controller"
+ depends on USB_OHCI_HCD && SUPERH
+ ---help---
+ Enables support for the on-chip OHCI controller on the SuperH.
+ If you use the PCI OHCI controller, this option is not necessary.
+
config USB_CNS3XXX_OHCI
bool "Cavium CNS3XXX OHCI Module"
depends on USB_OHCI_HCD && ARCH_CNS3XXX
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index d6a69d514a8..b2ed55cb811 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -115,7 +115,7 @@ static const struct hc_driver ehci_atmel_hc_driver = {
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
-static int __init ehci_atmel_drv_probe(struct platform_device *pdev)
+static int __devinit ehci_atmel_drv_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
const struct hc_driver *driver = &ehci_atmel_hc_driver;
@@ -207,7 +207,7 @@ fail_create_hcd:
return retval;
}
-static int __exit ehci_atmel_drv_remove(struct platform_device *pdev)
+static int __devexit ehci_atmel_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
@@ -227,7 +227,7 @@ static int __exit ehci_atmel_drv_remove(struct platform_device *pdev)
static struct platform_driver ehci_atmel_driver = {
.probe = ehci_atmel_drv_probe,
- .remove = __exit_p(ehci_atmel_drv_remove),
+ .remove = __devexit_p(ehci_atmel_drv_remove),
.shutdown = usb_hcd_platform_shutdown,
.driver.name = "atmel-ehci",
};
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 3be238a24cc..693c29b3052 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -28,11 +28,9 @@
dev_warn (ehci_to_hcd(ehci)->self.controller , fmt , ## args )
#ifdef VERBOSE_DEBUG
-# define vdbg dbg
# define ehci_vdbg ehci_dbg
#else
-# define vdbg(fmt,args...) do { } while (0)
-# define ehci_vdbg(ehci, fmt, args...) do { } while (0)
+ static inline void ehci_vdbg(struct ehci_hcd *ehci, ...) {}
#endif
#ifdef DEBUG
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 74dcf49bd01..d30c4e08c13 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -114,13 +114,11 @@ MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us\n");
#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
-/* for ASPM quirk of ISOC on AMD SB800 */
-static struct pci_dev *amd_nb_dev;
-
/*-------------------------------------------------------------------------*/
#include "ehci.h"
#include "ehci-dbg.c"
+#include "pci-quirks.h"
/*-------------------------------------------------------------------------*/
@@ -532,10 +530,8 @@ static void ehci_stop (struct usb_hcd *hcd)
spin_unlock_irq (&ehci->lock);
ehci_mem_cleanup (ehci);
- if (amd_nb_dev) {
- pci_dev_put(amd_nb_dev);
- amd_nb_dev = NULL;
- }
+ if (ehci->amd_pll_fix == 1)
+ usb_amd_dev_put();
#ifdef EHCI_STATS
ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n",
@@ -679,7 +675,12 @@ static int ehci_run (struct usb_hcd *hcd)
hcd->uses_new_polling = 1;
/* EHCI spec section 4.1 */
- if ((retval = ehci_reset(ehci)) != 0) {
+ /*
+ * TDI driver does the ehci_reset in their reset callback.
+ * Don't reset here, because configuration settings will
+ * vanish.
+ */
+ if (!ehci_is_TDI(ehci) && (retval = ehci_reset(ehci)) != 0) {
ehci_mem_cleanup(ehci);
return retval;
}
@@ -1179,7 +1180,7 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ehci_mxc_driver
#endif
-#ifdef CONFIG_CPU_SUBTYPE_SH7786
+#ifdef CONFIG_USB_EHCI_SH
#include "ehci-sh.c"
#define PLATFORM_DRIVER ehci_hcd_sh_driver
#endif
@@ -1254,6 +1255,16 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ehci_msm_driver
#endif
+#ifdef CONFIG_USB_EHCI_HCD_PMC_MSP
+#include "ehci-pmcmsp.c"
+#define PLATFORM_DRIVER ehci_hcd_msp_driver
+#endif
+
+#ifdef CONFIG_USB_EHCI_TEGRA
+#include "ehci-tegra.c"
+#define PLATFORM_DRIVER tegra_ehci_driver
+#endif
+
#if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \
!defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER) && \
!defined(XILINX_OF_PLATFORM_DRIVER)
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 72ae77c7b7c..d05ea03cfb4 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -559,14 +559,15 @@ static ssize_t store_companion(struct device *dev,
}
static DEVICE_ATTR(companion, 0644, show_companion, store_companion);
-static inline void create_companion_file(struct ehci_hcd *ehci)
+static inline int create_companion_file(struct ehci_hcd *ehci)
{
- int i;
+ int i = 0;
/* with integrated TT there is no companion! */
if (!ehci_is_TDI(ehci))
i = device_create_file(ehci_to_hcd(ehci)->self.controller,
&dev_attr_companion);
+ return i;
}
static inline void remove_companion_file(struct ehci_hcd *ehci)
@@ -716,8 +717,8 @@ ehci_hub_descriptor (
desc->bDescLength = 7 + 2 * temp;
/* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
- memset (&desc->bitmap [0], 0, temp);
- memset (&desc->bitmap [temp], 0xff, temp);
+ memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
+ memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
temp = 0x0008; /* per-port overcurrent reporting */
if (HCS_PPC (ehci->hcs_params))
diff --git a/drivers/usb/host/ehci-lpm.c b/drivers/usb/host/ehci-lpm.c
index b4d4d63c13e..2111627a19d 100644
--- a/drivers/usb/host/ehci-lpm.c
+++ b/drivers/usb/host/ehci-lpm.c
@@ -17,7 +17,8 @@
*/
/* this file is part of ehci-hcd.c */
-static int ehci_lpm_set_da(struct ehci_hcd *ehci, int dev_addr, int port_num)
+static int __maybe_unused ehci_lpm_set_da(struct ehci_hcd *ehci,
+ int dev_addr, int port_num)
{
u32 __iomem portsc;
@@ -37,7 +38,7 @@ static int ehci_lpm_set_da(struct ehci_hcd *ehci, int dev_addr, int port_num)
* this function is used to check if the device support LPM
* if yes, mark the PORTSC register with PORT_LPM bit
*/
-static int ehci_lpm_check(struct ehci_hcd *ehci, int port)
+static int __maybe_unused ehci_lpm_check(struct ehci_hcd *ehci, int port)
{
u32 __iomem *portsc ;
u32 val32;
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c
index 413f4deca53..9ce1b0bc186 100644
--- a/drivers/usb/host/ehci-msm.c
+++ b/drivers/usb/host/ehci-msm.c
@@ -1,6 +1,6 @@
/* ehci-msm.c - HSUSB Host Controller Driver Implementation
*
- * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
*
* Partly derived from ehci-fsl.c and ehci-hcd.c
* Copyright (c) 2000-2004 by David Brownell
@@ -34,92 +34,6 @@
static struct otg_transceiver *otg;
-/*
- * ehci_run defined in drivers/usb/host/ehci-hcd.c reset the controller and
- * the configuration settings in ehci_msm_reset vanish after controller is
- * reset. Resetting the controler in ehci_run seems to be un-necessary
- * provided HCD reset the controller before calling ehci_run. Most of the HCD
- * do but some are not. So this function is same as ehci_run but we don't
- * reset the controller here.
- */
-static int ehci_msm_run(struct usb_hcd *hcd)
-{
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- u32 temp;
- u32 hcc_params;
-
- hcd->uses_new_polling = 1;
-
- ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
- ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next);
-
- /*
- * hcc_params controls whether ehci->regs->segment must (!!!)
- * be used; it constrains QH/ITD/SITD and QTD locations.
- * pci_pool consistent memory always uses segment zero.
- * streaming mappings for I/O buffers, like pci_map_single(),
- * can return segments above 4GB, if the device allows.
- *
- * NOTE: the dma mask is visible through dma_supported(), so
- * drivers can pass this info along ... like NETIF_F_HIGHDMA,
- * Scsi_Host.highmem_io, and so forth. It's readonly to all
- * host side drivers though.
- */
- hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
- if (HCC_64BIT_ADDR(hcc_params))
- ehci_writel(ehci, 0, &ehci->regs->segment);
-
- /*
- * Philips, Intel, and maybe others need CMD_RUN before the
- * root hub will detect new devices (why?); NEC doesn't
- */
- ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
- ehci->command |= CMD_RUN;
- ehci_writel(ehci, ehci->command, &ehci->regs->command);
- dbg_cmd(ehci, "init", ehci->command);
-
- /*
- * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
- * are explicitly handed to companion controller(s), so no TT is
- * involved with the root hub. (Except where one is integrated,
- * and there's no companion controller unless maybe for USB OTG.)
- *
- * Turning on the CF flag will transfer ownership of all ports
- * from the companions to the EHCI controller. If any of the
- * companions are in the middle of a port reset at the time, it
- * could cause trouble. Write-locking ehci_cf_port_reset_rwsem
- * guarantees that no resets are in progress. After we set CF,
- * a short delay lets the hardware catch up; new resets shouldn't
- * be started before the port switching actions could complete.
- */
- down_write(&ehci_cf_port_reset_rwsem);
- hcd->state = HC_STATE_RUNNING;
- ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
- ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
- usleep_range(5000, 5500);
- up_write(&ehci_cf_port_reset_rwsem);
- ehci->last_periodic_enable = ktime_get_real();
-
- temp = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase));
- ehci_info(ehci,
- "USB %x.%x started, EHCI %x.%02x%s\n",
- ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
- temp >> 8, temp & 0xff,
- ignore_oc ? ", overcurrent ignored" : "");
-
- ehci_writel(ehci, INTR_MASK,
- &ehci->regs->intr_enable); /* Turn On Interrupts */
-
- /* GRR this is run-once init(), being done every time the HC starts.
- * So long as they're part of class devices, we can't do it init()
- * since the class device isn't created that early.
- */
- create_debug_files(ehci);
- create_companion_file(ehci);
-
- return 0;
-}
-
static int ehci_msm_reset(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
@@ -128,6 +42,8 @@ static int ehci_msm_reset(struct usb_hcd *hcd)
ehci->caps = USB_CAPLENGTH;
ehci->regs = USB_CAPLENGTH +
HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
+ dbg_hcs_params(ehci, "reset");
+ dbg_hcc_params(ehci, "reset");
/* cache the data to minimize the chip reads*/
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
@@ -135,6 +51,10 @@ static int ehci_msm_reset(struct usb_hcd *hcd)
hcd->has_tt = 1;
ehci->sbrn = HCD_USB2;
+ retval = ehci_halt(ehci);
+ if (retval)
+ return retval;
+
/* data structure init */
retval = ehci_init(hcd);
if (retval)
@@ -167,7 +87,7 @@ static struct hc_driver msm_hc_driver = {
.flags = HCD_USB2 | HCD_MEMORY,
.reset = ehci_msm_reset,
- .start = ehci_msm_run,
+ .start = ehci_run,
.stop = ehci_stop,
.shutdown = ehci_shutdown,
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index c8e360d7d97..25c8c10bb68 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -203,11 +203,6 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
mdelay(10);
}
- /* setup specific usb hw */
- ret = mxc_initialize_usb_hw(pdev->id, pdata->flags);
- if (ret < 0)
- goto err_init;
-
ehci = hcd_to_ehci(hcd);
/* EHCI registers start at offset 0x100 */
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index f784ceb862a..7e41a95c5ce 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -4,9 +4,10 @@
* Bus Glue for the EHCI controllers in OMAP3/4
* Tested on several OMAP3 boards, and OMAP4 Pandaboard
*
- * Copyright (C) 2007-2010 Texas Instruments, Inc.
+ * Copyright (C) 2007-2011 Texas Instruments, Inc.
* Author: Vikram Pandita <vikram.pandita@ti.com>
* Author: Anand Gadiyar <gadiyar@ti.com>
+ * Author: Keshava Munegowda <keshava_mgowda@ti.com>
*
* Copyright (C) 2009 Nokia Corporation
* Contact: Felipe Balbi <felipe.balbi@nokia.com>
@@ -27,116 +28,19 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
- * TODO (last updated Nov 21, 2010):
+ * TODO (last updated Feb 27, 2010):
* - add kernel-doc
* - enable AUTOIDLE
* - add suspend/resume
- * - move workarounds to board-files
- * - factor out code common to OHCI
* - add HSIC and TLL support
* - convert to use hwmod and runtime PM
*/
#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/gpio.h>
-#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/usb/ulpi.h>
#include <plat/usb.h>
-/*
- * OMAP USBHOST Register addresses: VIRTUAL ADDRESSES
- * Use ehci_omap_readl()/ehci_omap_writel() functions
- */
-
-/* TLL Register Set */
-#define OMAP_USBTLL_REVISION (0x00)
-#define OMAP_USBTLL_SYSCONFIG (0x10)
-#define OMAP_USBTLL_SYSCONFIG_CACTIVITY (1 << 8)
-#define OMAP_USBTLL_SYSCONFIG_SIDLEMODE (1 << 3)
-#define OMAP_USBTLL_SYSCONFIG_ENAWAKEUP (1 << 2)
-#define OMAP_USBTLL_SYSCONFIG_SOFTRESET (1 << 1)
-#define OMAP_USBTLL_SYSCONFIG_AUTOIDLE (1 << 0)
-
-#define OMAP_USBTLL_SYSSTATUS (0x14)
-#define OMAP_USBTLL_SYSSTATUS_RESETDONE (1 << 0)
-
-#define OMAP_USBTLL_IRQSTATUS (0x18)
-#define OMAP_USBTLL_IRQENABLE (0x1C)
-
-#define OMAP_TLL_SHARED_CONF (0x30)
-#define OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN (1 << 6)
-#define OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN (1 << 5)
-#define OMAP_TLL_SHARED_CONF_USB_DIVRATION (1 << 2)
-#define OMAP_TLL_SHARED_CONF_FCLK_REQ (1 << 1)
-#define OMAP_TLL_SHARED_CONF_FCLK_IS_ON (1 << 0)
-
-#define OMAP_TLL_CHANNEL_CONF(num) (0x040 + 0x004 * num)
-#define OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF (1 << 11)
-#define OMAP_TLL_CHANNEL_CONF_ULPI_ULPIAUTOIDLE (1 << 10)
-#define OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE (1 << 9)
-#define OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE (1 << 8)
-#define OMAP_TLL_CHANNEL_CONF_CHANEN (1 << 0)
-
-#define OMAP_TLL_ULPI_FUNCTION_CTRL(num) (0x804 + 0x100 * num)
-#define OMAP_TLL_ULPI_INTERFACE_CTRL(num) (0x807 + 0x100 * num)
-#define OMAP_TLL_ULPI_OTG_CTRL(num) (0x80A + 0x100 * num)
-#define OMAP_TLL_ULPI_INT_EN_RISE(num) (0x80D + 0x100 * num)
-#define OMAP_TLL_ULPI_INT_EN_FALL(num) (0x810 + 0x100 * num)
-#define OMAP_TLL_ULPI_INT_STATUS(num) (0x813 + 0x100 * num)
-#define OMAP_TLL_ULPI_INT_LATCH(num) (0x814 + 0x100 * num)
-#define OMAP_TLL_ULPI_DEBUG(num) (0x815 + 0x100 * num)
-#define OMAP_TLL_ULPI_SCRATCH_REGISTER(num) (0x816 + 0x100 * num)
-
-#define OMAP_TLL_CHANNEL_COUNT 3
-#define OMAP_TLL_CHANNEL_1_EN_MASK (1 << 0)
-#define OMAP_TLL_CHANNEL_2_EN_MASK (1 << 1)
-#define OMAP_TLL_CHANNEL_3_EN_MASK (1 << 2)
-
-/* UHH Register Set */
-#define OMAP_UHH_REVISION (0x00)
-#define OMAP_UHH_SYSCONFIG (0x10)
-#define OMAP_UHH_SYSCONFIG_MIDLEMODE (1 << 12)
-#define OMAP_UHH_SYSCONFIG_CACTIVITY (1 << 8)
-#define OMAP_UHH_SYSCONFIG_SIDLEMODE (1 << 3)
-#define OMAP_UHH_SYSCONFIG_ENAWAKEUP (1 << 2)
-#define OMAP_UHH_SYSCONFIG_SOFTRESET (1 << 1)
-#define OMAP_UHH_SYSCONFIG_AUTOIDLE (1 << 0)
-
-#define OMAP_UHH_SYSSTATUS (0x14)
-#define OMAP_UHH_HOSTCONFIG (0x40)
-#define OMAP_UHH_HOSTCONFIG_ULPI_BYPASS (1 << 0)
-#define OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS (1 << 0)
-#define OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS (1 << 11)
-#define OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS (1 << 12)
-#define OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN (1 << 2)
-#define OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN (1 << 3)
-#define OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN (1 << 4)
-#define OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN (1 << 5)
-#define OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS (1 << 8)
-#define OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS (1 << 9)
-#define OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS (1 << 10)
-
-/* OMAP4-specific defines */
-#define OMAP4_UHH_SYSCONFIG_IDLEMODE_CLEAR (3 << 2)
-#define OMAP4_UHH_SYSCONFIG_NOIDLE (1 << 2)
-
-#define OMAP4_UHH_SYSCONFIG_STDBYMODE_CLEAR (3 << 4)
-#define OMAP4_UHH_SYSCONFIG_NOSTDBY (1 << 4)
-#define OMAP4_UHH_SYSCONFIG_SOFTRESET (1 << 0)
-
-#define OMAP4_P1_MODE_CLEAR (3 << 16)
-#define OMAP4_P1_MODE_TLL (1 << 16)
-#define OMAP4_P1_MODE_HSIC (3 << 16)
-#define OMAP4_P2_MODE_CLEAR (3 << 18)
-#define OMAP4_P2_MODE_TLL (1 << 18)
-#define OMAP4_P2_MODE_HSIC (3 << 18)
-
-#define OMAP_REV2_TLL_CHANNEL_COUNT 2
-
-#define OMAP_UHH_DEBUG_CSR (0x44)
-
/* EHCI Register Set */
#define EHCI_INSNREG04 (0xA0)
#define EHCI_INSNREG04_DISABLE_UNSUSPEND (1 << 5)
@@ -148,137 +52,24 @@
#define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8
#define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0
-/* Values of UHH_REVISION - Note: these are not given in the TRM */
-#define OMAP_EHCI_REV1 0x00000010 /* OMAP3 */
-#define OMAP_EHCI_REV2 0x50700100 /* OMAP4 */
-
-#define is_omap_ehci_rev1(x) (x->omap_ehci_rev == OMAP_EHCI_REV1)
-#define is_omap_ehci_rev2(x) (x->omap_ehci_rev == OMAP_EHCI_REV2)
+/*-------------------------------------------------------------------------*/
-#define is_ehci_phy_mode(x) (x == EHCI_HCD_OMAP_MODE_PHY)
-#define is_ehci_tll_mode(x) (x == EHCI_HCD_OMAP_MODE_TLL)
-#define is_ehci_hsic_mode(x) (x == EHCI_HCD_OMAP_MODE_HSIC)
+static const struct hc_driver ehci_omap_hc_driver;
-/*-------------------------------------------------------------------------*/
-static inline void ehci_omap_writel(void __iomem *base, u32 reg, u32 val)
+static inline void ehci_write(void __iomem *base, u32 reg, u32 val)
{
__raw_writel(val, base + reg);
}
-static inline u32 ehci_omap_readl(void __iomem *base, u32 reg)
+static inline u32 ehci_read(void __iomem *base, u32 reg)
{
return __raw_readl(base + reg);
}
-static inline void ehci_omap_writeb(void __iomem *base, u8 reg, u8 val)
-{
- __raw_writeb(val, base + reg);
-}
-
-static inline u8 ehci_omap_readb(void __iomem *base, u8 reg)
-{
- return __raw_readb(base + reg);
-}
-
-/*-------------------------------------------------------------------------*/
-
-struct ehci_hcd_omap {
- struct ehci_hcd *ehci;
- struct device *dev;
-
- struct clk *usbhost_ick;
- struct clk *usbhost_hs_fck;
- struct clk *usbhost_fs_fck;
- struct clk *usbtll_fck;
- struct clk *usbtll_ick;
- struct clk *xclk60mhsp1_ck;
- struct clk *xclk60mhsp2_ck;
- struct clk *utmi_p1_fck;
- struct clk *utmi_p2_fck;
-
- /* FIXME the following two workarounds are
- * board specific not silicon-specific so these
- * should be moved to board-file instead.
- *
- * Maybe someone from TI will know better which
- * board is affected and needs the workarounds
- * to be applied
- */
-
- /* gpio for resetting phy */
- int reset_gpio_port[OMAP3_HS_USB_PORTS];
-
- /* phy reset workaround */
- int phy_reset;
-
- /* IP revision */
- u32 omap_ehci_rev;
-
- /* desired phy_mode: TLL, PHY */
- enum ehci_hcd_omap_mode port_mode[OMAP3_HS_USB_PORTS];
-
- void __iomem *uhh_base;
- void __iomem *tll_base;
- void __iomem *ehci_base;
-
- /* Regulators for USB PHYs.
- * Each PHY can have a separate regulator.
- */
- struct regulator *regulator[OMAP3_HS_USB_PORTS];
-};
-
-/*-------------------------------------------------------------------------*/
-
-static void omap_usb_utmi_init(struct ehci_hcd_omap *omap, u8 tll_channel_mask,
- u8 tll_channel_count)
-{
- unsigned reg;
- int i;
-
- /* Program the 3 TLL channels upfront */
- for (i = 0; i < tll_channel_count; i++) {
- reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i));
-
- /* Disable AutoIdle, BitStuffing and use SDR Mode */
- reg &= ~(OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE
- | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF
- | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE);
- ehci_omap_writel(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i), reg);
- }
-
- /* Program Common TLL register */
- reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_SHARED_CONF);
- reg |= (OMAP_TLL_SHARED_CONF_FCLK_IS_ON
- | OMAP_TLL_SHARED_CONF_USB_DIVRATION
- | OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN);
- reg &= ~OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN;
-
- ehci_omap_writel(omap->tll_base, OMAP_TLL_SHARED_CONF, reg);
-
- /* Enable channels now */
- for (i = 0; i < tll_channel_count; i++) {
- reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i));
-
- /* Enable only the reg that is needed */
- if (!(tll_channel_mask & 1<<i))
- continue;
-
- reg |= OMAP_TLL_CHANNEL_CONF_CHANEN;
- ehci_omap_writel(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i), reg);
-
- ehci_omap_writeb(omap->tll_base,
- OMAP_TLL_ULPI_SCRATCH_REGISTER(i), 0xbe);
- dev_dbg(omap->dev, "ULPI_SCRATCH_REG[ch=%d]= 0x%02x\n",
- i+1, ehci_omap_readb(omap->tll_base,
- OMAP_TLL_ULPI_SCRATCH_REGISTER(i)));
- }
-}
-
-/*-------------------------------------------------------------------------*/
-
-static void omap_ehci_soft_phy_reset(struct ehci_hcd_omap *omap, u8 port)
+static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port)
{
+ struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev);
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
unsigned reg = 0;
@@ -292,266 +83,87 @@ static void omap_ehci_soft_phy_reset(struct ehci_hcd_omap *omap, u8 port)
/* start ULPI access*/
| (1 << EHCI_INSNREG05_ULPI_CONTROL_SHIFT);
- ehci_omap_writel(omap->ehci_base, EHCI_INSNREG05_ULPI, reg);
+ ehci_write(hcd->regs, EHCI_INSNREG05_ULPI, reg);
/* Wait for ULPI access completion */
- while ((ehci_omap_readl(omap->ehci_base, EHCI_INSNREG05_ULPI)
+ while ((ehci_read(hcd->regs, EHCI_INSNREG05_ULPI)
& (1 << EHCI_INSNREG05_ULPI_CONTROL_SHIFT))) {
cpu_relax();
if (time_after(jiffies, timeout)) {
- dev_dbg(omap->dev, "phy reset operation timed out\n");
+ dev_dbg(&pdev->dev, "phy reset operation timed out\n");
break;
}
}
}
-/* omap_start_ehc
- * - Start the TI USBHOST controller
- */
-static int omap_start_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(1000);
- u8 tll_ch_mask = 0;
- unsigned reg = 0;
- int ret = 0;
-
- dev_dbg(omap->dev, "starting TI EHCI USB Controller\n");
- /* Enable Clocks for USBHOST */
- omap->usbhost_ick = clk_get(omap->dev, "usbhost_ick");
- if (IS_ERR(omap->usbhost_ick)) {
- ret = PTR_ERR(omap->usbhost_ick);
- goto err_host_ick;
- }
- clk_enable(omap->usbhost_ick);
-
- omap->usbhost_hs_fck = clk_get(omap->dev, "hs_fck");
- if (IS_ERR(omap->usbhost_hs_fck)) {
- ret = PTR_ERR(omap->usbhost_hs_fck);
- goto err_host_120m_fck;
- }
- clk_enable(omap->usbhost_hs_fck);
+/* configure so an HC device and id are always provided */
+/* always called with process context; sleeping is OK */
- omap->usbhost_fs_fck = clk_get(omap->dev, "fs_fck");
- if (IS_ERR(omap->usbhost_fs_fck)) {
- ret = PTR_ERR(omap->usbhost_fs_fck);
- goto err_host_48m_fck;
- }
- clk_enable(omap->usbhost_fs_fck);
-
- if (omap->phy_reset) {
- /* Refer: ISSUE1 */
- if (gpio_is_valid(omap->reset_gpio_port[0])) {
- gpio_request(omap->reset_gpio_port[0],
- "USB1 PHY reset");
- gpio_direction_output(omap->reset_gpio_port[0], 0);
- }
+/**
+ * ehci_hcd_omap_probe - initialize TI-based HCDs
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ */
+static int ehci_hcd_omap_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ehci_hcd_omap_platform_data *pdata = dev->platform_data;
+ struct resource *res;
+ struct usb_hcd *hcd;
+ void __iomem *regs;
+ struct ehci_hcd *omap_ehci;
+ int ret = -ENODEV;
+ int irq;
- if (gpio_is_valid(omap->reset_gpio_port[1])) {
- gpio_request(omap->reset_gpio_port[1],
- "USB2 PHY reset");
- gpio_direction_output(omap->reset_gpio_port[1], 0);
- }
+ if (usb_disabled())
+ return -ENODEV;
- /* Hold the PHY in RESET for enough time till DIR is high */
- udelay(10);
+ if (!dev->parent) {
+ dev_err(dev, "Missing parent device\n");
+ return -ENODEV;
}
- /* Configure TLL for 60Mhz clk for ULPI */
- omap->usbtll_fck = clk_get(omap->dev, "usbtll_fck");
- if (IS_ERR(omap->usbtll_fck)) {
- ret = PTR_ERR(omap->usbtll_fck);
- goto err_tll_fck;
+ irq = platform_get_irq_byname(pdev, "ehci-irq");
+ if (irq < 0) {
+ dev_err(dev, "EHCI irq failed\n");
+ return -ENODEV;
}
- clk_enable(omap->usbtll_fck);
- omap->usbtll_ick = clk_get(omap->dev, "usbtll_ick");
- if (IS_ERR(omap->usbtll_ick)) {
- ret = PTR_ERR(omap->usbtll_ick);
- goto err_tll_ick;
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "ehci");
+ if (!res) {
+ dev_err(dev, "UHH EHCI get resource failed\n");
+ return -ENODEV;
}
- clk_enable(omap->usbtll_ick);
-
- omap->omap_ehci_rev = ehci_omap_readl(omap->uhh_base,
- OMAP_UHH_REVISION);
- dev_dbg(omap->dev, "OMAP UHH_REVISION 0x%x\n",
- omap->omap_ehci_rev);
- /*
- * Enable per-port clocks as needed (newer controllers only).
- * - External ULPI clock for PHY mode
- * - Internal clocks for TLL and HSIC modes (TODO)
- */
- if (is_omap_ehci_rev2(omap)) {
- switch (omap->port_mode[0]) {
- case EHCI_HCD_OMAP_MODE_PHY:
- omap->xclk60mhsp1_ck = clk_get(omap->dev,
- "xclk60mhsp1_ck");
- if (IS_ERR(omap->xclk60mhsp1_ck)) {
- ret = PTR_ERR(omap->xclk60mhsp1_ck);
- dev_err(omap->dev,
- "Unable to get Port1 ULPI clock\n");
- }
-
- omap->utmi_p1_fck = clk_get(omap->dev,
- "utmi_p1_gfclk");
- if (IS_ERR(omap->utmi_p1_fck)) {
- ret = PTR_ERR(omap->utmi_p1_fck);
- dev_err(omap->dev,
- "Unable to get utmi_p1_fck\n");
- }
-
- ret = clk_set_parent(omap->utmi_p1_fck,
- omap->xclk60mhsp1_ck);
- if (ret != 0) {
- dev_err(omap->dev,
- "Unable to set P1 f-clock\n");
- }
- break;
- case EHCI_HCD_OMAP_MODE_TLL:
- /* TODO */
- default:
- break;
- }
- switch (omap->port_mode[1]) {
- case EHCI_HCD_OMAP_MODE_PHY:
- omap->xclk60mhsp2_ck = clk_get(omap->dev,
- "xclk60mhsp2_ck");
- if (IS_ERR(omap->xclk60mhsp2_ck)) {
- ret = PTR_ERR(omap->xclk60mhsp2_ck);
- dev_err(omap->dev,
- "Unable to get Port2 ULPI clock\n");
- }
-
- omap->utmi_p2_fck = clk_get(omap->dev,
- "utmi_p2_gfclk");
- if (IS_ERR(omap->utmi_p2_fck)) {
- ret = PTR_ERR(omap->utmi_p2_fck);
- dev_err(omap->dev,
- "Unable to get utmi_p2_fck\n");
- }
-
- ret = clk_set_parent(omap->utmi_p2_fck,
- omap->xclk60mhsp2_ck);
- if (ret != 0) {
- dev_err(omap->dev,
- "Unable to set P2 f-clock\n");
- }
- break;
- case EHCI_HCD_OMAP_MODE_TLL:
- /* TODO */
- default:
- break;
- }
+ regs = ioremap(res->start, resource_size(res));
+ if (!regs) {
+ dev_err(dev, "UHH EHCI ioremap failed\n");
+ return -ENOMEM;
}
-
- /* perform TLL soft reset, and wait until reset is complete */
- ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
- OMAP_USBTLL_SYSCONFIG_SOFTRESET);
-
- /* Wait for TLL reset to complete */
- while (!(ehci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
- & OMAP_USBTLL_SYSSTATUS_RESETDONE)) {
- cpu_relax();
-
- if (time_after(jiffies, timeout)) {
- dev_dbg(omap->dev, "operation timed out\n");
- ret = -EINVAL;
- goto err_sys_status;
- }
+ hcd = usb_create_hcd(&ehci_omap_hc_driver, dev,
+ dev_name(dev));
+ if (!hcd) {
+ dev_err(dev, "failed to create hcd with err %d\n", ret);
+ ret = -ENOMEM;
+ goto err_io;
}
- dev_dbg(omap->dev, "TLL RESET DONE\n");
-
- /* (1<<3) = no idle mode only for initial debugging */
- ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
- OMAP_USBTLL_SYSCONFIG_ENAWAKEUP |
- OMAP_USBTLL_SYSCONFIG_SIDLEMODE |
- OMAP_USBTLL_SYSCONFIG_CACTIVITY);
-
-
- /* Put UHH in NoIdle/NoStandby mode */
- reg = ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSCONFIG);
- if (is_omap_ehci_rev1(omap)) {
- reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
- | OMAP_UHH_SYSCONFIG_SIDLEMODE
- | OMAP_UHH_SYSCONFIG_CACTIVITY
- | OMAP_UHH_SYSCONFIG_MIDLEMODE);
- reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
-
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ hcd->regs = regs;
- } else if (is_omap_ehci_rev2(omap)) {
- reg &= ~OMAP4_UHH_SYSCONFIG_IDLEMODE_CLEAR;
- reg |= OMAP4_UHH_SYSCONFIG_NOIDLE;
- reg &= ~OMAP4_UHH_SYSCONFIG_STDBYMODE_CLEAR;
- reg |= OMAP4_UHH_SYSCONFIG_NOSTDBY;
- }
- ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
-
- reg = ehci_omap_readl(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
-
- /* setup ULPI bypass and burst configurations */
- reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN
- | OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN
- | OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN);
- reg &= ~OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN;
-
- if (is_omap_ehci_rev1(omap)) {
- if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_UNKNOWN)
- reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS;
- if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_UNKNOWN)
- reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS;
- if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_UNKNOWN)
- reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
-
- /* Bypass the TLL module for PHY mode operation */
- if (cpu_is_omap3430() && (omap_rev() <= OMAP3430_REV_ES2_1)) {
- dev_dbg(omap->dev, "OMAP3 ES version <= ES2.1\n");
- if (is_ehci_phy_mode(omap->port_mode[0]) ||
- is_ehci_phy_mode(omap->port_mode[1]) ||
- is_ehci_phy_mode(omap->port_mode[2]))
- reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
- else
- reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
- } else {
- dev_dbg(omap->dev, "OMAP3 ES version > ES2.1\n");
- if (is_ehci_phy_mode(omap->port_mode[0]))
- reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
- else if (is_ehci_tll_mode(omap->port_mode[0]))
- reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
-
- if (is_ehci_phy_mode(omap->port_mode[1]))
- reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
- else if (is_ehci_tll_mode(omap->port_mode[1]))
- reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
-
- if (is_ehci_phy_mode(omap->port_mode[2]))
- reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
- else if (is_ehci_tll_mode(omap->port_mode[2]))
- reg |= OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
- }
- } else if (is_omap_ehci_rev2(omap)) {
- /* Clear port mode fields for PHY mode*/
- reg &= ~OMAP4_P1_MODE_CLEAR;
- reg &= ~OMAP4_P2_MODE_CLEAR;
-
- if (is_ehci_tll_mode(omap->port_mode[0]))
- reg |= OMAP4_P1_MODE_TLL;
- else if (is_ehci_hsic_mode(omap->port_mode[0]))
- reg |= OMAP4_P1_MODE_HSIC;
-
- if (is_ehci_tll_mode(omap->port_mode[1]))
- reg |= OMAP4_P2_MODE_TLL;
- else if (is_ehci_hsic_mode(omap->port_mode[1]))
- reg |= OMAP4_P2_MODE_HSIC;
+ ret = omap_usbhs_enable(dev);
+ if (ret) {
+ dev_err(dev, "failed to start usbhs with err %d\n", ret);
+ goto err_enable;
}
- ehci_omap_writel(omap->uhh_base, OMAP_UHH_HOSTCONFIG, reg);
- dev_dbg(omap->dev, "UHH setup done, uhh_hostconfig=%x\n", reg);
-
-
/*
* An undocumented "feature" in the OMAP3 EHCI controller,
* causes suspended ports to be taken out of suspend when
@@ -561,363 +173,50 @@ static int omap_start_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
* to suspend. Writing 1 to this undocumented register bit
* disables this feature and restores normal behavior.
*/
- ehci_omap_writel(omap->ehci_base, EHCI_INSNREG04,
+ ehci_write(regs, EHCI_INSNREG04,
EHCI_INSNREG04_DISABLE_UNSUSPEND);
- if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL) ||
- (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL) ||
- (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL)) {
-
- if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL)
- tll_ch_mask |= OMAP_TLL_CHANNEL_1_EN_MASK;
- if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL)
- tll_ch_mask |= OMAP_TLL_CHANNEL_2_EN_MASK;
- if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL)
- tll_ch_mask |= OMAP_TLL_CHANNEL_3_EN_MASK;
-
- /* Enable UTMI mode for required TLL channels */
- omap_usb_utmi_init(omap, tll_ch_mask, OMAP_TLL_CHANNEL_COUNT);
- }
-
- if (omap->phy_reset) {
- /* Refer ISSUE1:
- * Hold the PHY in RESET for enough time till
- * PHY is settled and ready
- */
- udelay(10);
-
- if (gpio_is_valid(omap->reset_gpio_port[0]))
- gpio_set_value(omap->reset_gpio_port[0], 1);
-
- if (gpio_is_valid(omap->reset_gpio_port[1]))
- gpio_set_value(omap->reset_gpio_port[1], 1);
- }
-
/* Soft reset the PHY using PHY reset command over ULPI */
- if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY)
- omap_ehci_soft_phy_reset(omap, 0);
- if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY)
- omap_ehci_soft_phy_reset(omap, 1);
-
- return 0;
-
-err_sys_status:
- clk_disable(omap->utmi_p2_fck);
- clk_put(omap->utmi_p2_fck);
- clk_disable(omap->xclk60mhsp2_ck);
- clk_put(omap->xclk60mhsp2_ck);
- clk_disable(omap->utmi_p1_fck);
- clk_put(omap->utmi_p1_fck);
- clk_disable(omap->xclk60mhsp1_ck);
- clk_put(omap->xclk60mhsp1_ck);
- clk_disable(omap->usbtll_ick);
- clk_put(omap->usbtll_ick);
-
-err_tll_ick:
- clk_disable(omap->usbtll_fck);
- clk_put(omap->usbtll_fck);
-
-err_tll_fck:
- clk_disable(omap->usbhost_fs_fck);
- clk_put(omap->usbhost_fs_fck);
-
- if (omap->phy_reset) {
- if (gpio_is_valid(omap->reset_gpio_port[0]))
- gpio_free(omap->reset_gpio_port[0]);
-
- if (gpio_is_valid(omap->reset_gpio_port[1]))
- gpio_free(omap->reset_gpio_port[1]);
- }
-
-err_host_48m_fck:
- clk_disable(omap->usbhost_hs_fck);
- clk_put(omap->usbhost_hs_fck);
+ if (pdata->port_mode[0] == OMAP_EHCI_PORT_MODE_PHY)
+ omap_ehci_soft_phy_reset(pdev, 0);
+ if (pdata->port_mode[1] == OMAP_EHCI_PORT_MODE_PHY)
+ omap_ehci_soft_phy_reset(pdev, 1);
-err_host_120m_fck:
- clk_disable(omap->usbhost_ick);
- clk_put(omap->usbhost_ick);
-
-err_host_ick:
- return ret;
-}
-
-static void omap_stop_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(100);
-
- dev_dbg(omap->dev, "stopping TI EHCI USB Controller\n");
-
- /* Reset OMAP modules for insmod/rmmod to work */
- ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG,
- is_omap_ehci_rev2(omap) ?
- OMAP4_UHH_SYSCONFIG_SOFTRESET :
- OMAP_UHH_SYSCONFIG_SOFTRESET);
- while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
- & (1 << 0))) {
- cpu_relax();
-
- if (time_after(jiffies, timeout))
- dev_dbg(omap->dev, "operation timed out\n");
- }
-
- while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
- & (1 << 1))) {
- cpu_relax();
-
- if (time_after(jiffies, timeout))
- dev_dbg(omap->dev, "operation timed out\n");
- }
-
- while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
- & (1 << 2))) {
- cpu_relax();
-
- if (time_after(jiffies, timeout))
- dev_dbg(omap->dev, "operation timed out\n");
- }
-
- ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG, (1 << 1));
-
- while (!(ehci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
- & (1 << 0))) {
- cpu_relax();
-
- if (time_after(jiffies, timeout))
- dev_dbg(omap->dev, "operation timed out\n");
- }
-
- if (omap->usbtll_fck != NULL) {
- clk_disable(omap->usbtll_fck);
- clk_put(omap->usbtll_fck);
- omap->usbtll_fck = NULL;
- }
-
- if (omap->usbhost_ick != NULL) {
- clk_disable(omap->usbhost_ick);
- clk_put(omap->usbhost_ick);
- omap->usbhost_ick = NULL;
- }
-
- if (omap->usbhost_fs_fck != NULL) {
- clk_disable(omap->usbhost_fs_fck);
- clk_put(omap->usbhost_fs_fck);
- omap->usbhost_fs_fck = NULL;
- }
-
- if (omap->usbhost_hs_fck != NULL) {
- clk_disable(omap->usbhost_hs_fck);
- clk_put(omap->usbhost_hs_fck);
- omap->usbhost_hs_fck = NULL;
- }
-
- if (omap->usbtll_ick != NULL) {
- clk_disable(omap->usbtll_ick);
- clk_put(omap->usbtll_ick);
- omap->usbtll_ick = NULL;
- }
-
- if (is_omap_ehci_rev2(omap)) {
- if (omap->xclk60mhsp1_ck != NULL) {
- clk_disable(omap->xclk60mhsp1_ck);
- clk_put(omap->xclk60mhsp1_ck);
- omap->xclk60mhsp1_ck = NULL;
- }
-
- if (omap->utmi_p1_fck != NULL) {
- clk_disable(omap->utmi_p1_fck);
- clk_put(omap->utmi_p1_fck);
- omap->utmi_p1_fck = NULL;
- }
-
- if (omap->xclk60mhsp2_ck != NULL) {
- clk_disable(omap->xclk60mhsp2_ck);
- clk_put(omap->xclk60mhsp2_ck);
- omap->xclk60mhsp2_ck = NULL;
- }
-
- if (omap->utmi_p2_fck != NULL) {
- clk_disable(omap->utmi_p2_fck);
- clk_put(omap->utmi_p2_fck);
- omap->utmi_p2_fck = NULL;
- }
- }
-
- if (omap->phy_reset) {
- if (gpio_is_valid(omap->reset_gpio_port[0]))
- gpio_free(omap->reset_gpio_port[0]);
-
- if (gpio_is_valid(omap->reset_gpio_port[1]))
- gpio_free(omap->reset_gpio_port[1]);
- }
-
- dev_dbg(omap->dev, "Clock to USB host has been disabled\n");
-}
-
-/*-------------------------------------------------------------------------*/
-
-static const struct hc_driver ehci_omap_hc_driver;
-
-/* configure so an HC device and id are always provided */
-/* always called with process context; sleeping is OK */
-
-/**
- * ehci_hcd_omap_probe - initialize TI-based HCDs
- *
- * Allocates basic resources for this USB host controller, and
- * then invokes the start() method for the HCD associated with it
- * through the hotplug entry's driver_data.
- */
-static int ehci_hcd_omap_probe(struct platform_device *pdev)
-{
- struct ehci_hcd_omap_platform_data *pdata = pdev->dev.platform_data;
- struct ehci_hcd_omap *omap;
- struct resource *res;
- struct usb_hcd *hcd;
-
- int irq = platform_get_irq(pdev, 0);
- int ret = -ENODEV;
- int i;
- char supply[7];
-
- if (!pdata) {
- dev_dbg(&pdev->dev, "missing platform_data\n");
- goto err_pdata;
- }
-
- if (usb_disabled())
- goto err_disabled;
-
- omap = kzalloc(sizeof(*omap), GFP_KERNEL);
- if (!omap) {
- ret = -ENOMEM;
- goto err_disabled;
- }
-
- hcd = usb_create_hcd(&ehci_omap_hc_driver, &pdev->dev,
- dev_name(&pdev->dev));
- if (!hcd) {
- dev_err(&pdev->dev, "failed to create hcd with err %d\n", ret);
- ret = -ENOMEM;
- goto err_create_hcd;
- }
-
- platform_set_drvdata(pdev, omap);
- omap->dev = &pdev->dev;
- omap->phy_reset = pdata->phy_reset;
- omap->reset_gpio_port[0] = pdata->reset_gpio_port[0];
- omap->reset_gpio_port[1] = pdata->reset_gpio_port[1];
- omap->reset_gpio_port[2] = pdata->reset_gpio_port[2];
- omap->port_mode[0] = pdata->port_mode[0];
- omap->port_mode[1] = pdata->port_mode[1];
- omap->port_mode[2] = pdata->port_mode[2];
- omap->ehci = hcd_to_ehci(hcd);
- omap->ehci->sbrn = 0x20;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
-
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- dev_err(&pdev->dev, "EHCI ioremap failed\n");
- ret = -ENOMEM;
- goto err_ioremap;
- }
+ omap_ehci = hcd_to_ehci(hcd);
+ omap_ehci->sbrn = 0x20;
/* we know this is the memory we want, no need to ioremap again */
- omap->ehci->caps = hcd->regs;
- omap->ehci_base = hcd->regs;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- omap->uhh_base = ioremap(res->start, resource_size(res));
- if (!omap->uhh_base) {
- dev_err(&pdev->dev, "UHH ioremap failed\n");
- ret = -ENOMEM;
- goto err_uhh_ioremap;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- omap->tll_base = ioremap(res->start, resource_size(res));
- if (!omap->tll_base) {
- dev_err(&pdev->dev, "TLL ioremap failed\n");
- ret = -ENOMEM;
- goto err_tll_ioremap;
- }
-
- /* get ehci regulator and enable */
- for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
- if (omap->port_mode[i] != EHCI_HCD_OMAP_MODE_PHY) {
- omap->regulator[i] = NULL;
- continue;
- }
- snprintf(supply, sizeof(supply), "hsusb%d", i);
- omap->regulator[i] = regulator_get(omap->dev, supply);
- if (IS_ERR(omap->regulator[i])) {
- omap->regulator[i] = NULL;
- dev_dbg(&pdev->dev,
- "failed to get ehci port%d regulator\n", i);
- } else {
- regulator_enable(omap->regulator[i]);
- }
- }
-
- ret = omap_start_ehc(omap, hcd);
- if (ret) {
- dev_err(&pdev->dev, "failed to start ehci with err %d\n", ret);
- goto err_start;
- }
+ omap_ehci->caps = hcd->regs;
+ omap_ehci->regs = hcd->regs
+ + HC_LENGTH(readl(&omap_ehci->caps->hc_capbase));
- omap->ehci->regs = hcd->regs
- + HC_LENGTH(readl(&omap->ehci->caps->hc_capbase));
-
- dbg_hcs_params(omap->ehci, "reset");
- dbg_hcc_params(omap->ehci, "reset");
+ dbg_hcs_params(omap_ehci, "reset");
+ dbg_hcc_params(omap_ehci, "reset");
/* cache this readonly data; minimize chip reads */
- omap->ehci->hcs_params = readl(&omap->ehci->caps->hcs_params);
+ omap_ehci->hcs_params = readl(&omap_ehci->caps->hcs_params);
ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
if (ret) {
- dev_err(&pdev->dev, "failed to add hcd with err %d\n", ret);
+ dev_err(dev, "failed to add hcd with err %d\n", ret);
goto err_add_hcd;
}
/* root ports should always stay powered */
- ehci_port_power(omap->ehci, 1);
+ ehci_port_power(omap_ehci, 1);
return 0;
err_add_hcd:
- omap_stop_ehc(omap, hcd);
+ omap_usbhs_disable(dev);
-err_start:
- for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
- if (omap->regulator[i]) {
- regulator_disable(omap->regulator[i]);
- regulator_put(omap->regulator[i]);
- }
- }
- iounmap(omap->tll_base);
-
-err_tll_ioremap:
- iounmap(omap->uhh_base);
-
-err_uhh_ioremap:
- iounmap(hcd->regs);
-
-err_ioremap:
+err_enable:
usb_put_hcd(hcd);
-err_create_hcd:
- kfree(omap);
-err_disabled:
-err_pdata:
+err_io:
return ret;
}
-/* may be called without controller electrically present */
-/* may be called with controller, bus, and devices active */
/**
* ehci_hcd_omap_remove - shutdown processing for EHCI HCDs
@@ -929,31 +228,18 @@ err_pdata:
*/
static int ehci_hcd_omap_remove(struct platform_device *pdev)
{
- struct ehci_hcd_omap *omap = platform_get_drvdata(pdev);
- struct usb_hcd *hcd = ehci_to_hcd(omap->ehci);
- int i;
+ struct device *dev = &pdev->dev;
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
usb_remove_hcd(hcd);
- omap_stop_ehc(omap, hcd);
- iounmap(hcd->regs);
- for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
- if (omap->regulator[i]) {
- regulator_disable(omap->regulator[i]);
- regulator_put(omap->regulator[i]);
- }
- }
- iounmap(omap->tll_base);
- iounmap(omap->uhh_base);
+ omap_usbhs_disable(dev);
usb_put_hcd(hcd);
- kfree(omap);
-
return 0;
}
static void ehci_hcd_omap_shutdown(struct platform_device *pdev)
{
- struct ehci_hcd_omap *omap = platform_get_drvdata(pdev);
- struct usb_hcd *hcd = ehci_to_hcd(omap->ehci);
+ struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev);
if (hcd->driver->shutdown)
hcd->driver->shutdown(hcd);
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 0f87dc72820..281e094e1c1 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -105,7 +105,8 @@ static int ehci_orion_setup(struct usb_hcd *hcd)
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
int retval;
- ehci_reset(ehci);
+ hcd->has_tt = 1;
+
retval = ehci_halt(ehci);
if (retval)
return retval;
@@ -117,7 +118,7 @@ static int ehci_orion_setup(struct usb_hcd *hcd)
if (retval)
return retval;
- hcd->has_tt = 1;
+ ehci_reset(ehci);
ehci_port_power(ehci, 0);
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 07bb982e59f..d5eaea7caf8 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -44,42 +44,6 @@ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
return 0;
}
-static int ehci_quirk_amd_hudson(struct ehci_hcd *ehci)
-{
- struct pci_dev *amd_smbus_dev;
- u8 rev = 0;
-
- amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
- if (amd_smbus_dev) {
- pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
- if (rev < 0x40) {
- pci_dev_put(amd_smbus_dev);
- amd_smbus_dev = NULL;
- return 0;
- }
- } else {
- amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x780b, NULL);
- if (!amd_smbus_dev)
- return 0;
- pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
- if (rev < 0x11 || rev > 0x18) {
- pci_dev_put(amd_smbus_dev);
- amd_smbus_dev = NULL;
- return 0;
- }
- }
-
- if (!amd_nb_dev)
- amd_nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
-
- ehci_info(ehci, "QUIRK: Enable exception for AMD Hudson ASPM\n");
-
- pci_dev_put(amd_smbus_dev);
- amd_smbus_dev = NULL;
-
- return 1;
-}
-
/* called during probe() after chip reset completes */
static int ehci_pci_setup(struct usb_hcd *hcd)
{
@@ -138,9 +102,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
/* cache this readonly data; minimize chip reads */
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
- if (ehci_quirk_amd_hudson(ehci))
- ehci->amd_l1_fix = 1;
-
retval = ehci_halt(ehci);
if (retval)
return retval;
@@ -191,6 +152,9 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
}
break;
case PCI_VENDOR_ID_AMD:
+ /* AMD PLL quirk */
+ if (usb_amd_find_chipset_info())
+ ehci->amd_pll_fix = 1;
/* AMD8111 EHCI doesn't work, according to AMD errata */
if (pdev->device == 0x7463) {
ehci_info(ehci, "ignoring AMD8111 (errata)\n");
@@ -236,6 +200,9 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
}
break;
case PCI_VENDOR_ID_ATI:
+ /* AMD PLL quirk */
+ if (usb_amd_find_chipset_info())
+ ehci->amd_pll_fix = 1;
/* SB600 and old version of SB700 have a bug in EHCI controller,
* which causes usb devices lose response in some cases.
*/
diff --git a/drivers/usb/host/ehci-pmcmsp.c b/drivers/usb/host/ehci-pmcmsp.c
new file mode 100644
index 00000000000..a2168642175
--- /dev/null
+++ b/drivers/usb/host/ehci-pmcmsp.c
@@ -0,0 +1,383 @@
+/*
+ * PMC MSP EHCI (Host Controller Driver) for USB.
+ *
+ * (C) Copyright 2006-2010 PMC-Sierra Inc
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+
+/* includes */
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/usb.h>
+#include <msp_usb.h>
+
+/* stream disable*/
+#define USB_CTRL_MODE_STREAM_DISABLE 0x10
+
+/* threshold */
+#define USB_CTRL_FIFO_THRESH 0x00300000
+
+/* register offset for usb_mode */
+#define USB_EHCI_REG_USB_MODE 0x68
+
+/* register offset for usb fifo */
+#define USB_EHCI_REG_USB_FIFO 0x24
+
+/* register offset for usb status */
+#define USB_EHCI_REG_USB_STATUS 0x44
+
+/* serial/parallel transceiver */
+#define USB_EHCI_REG_BIT_STAT_STS (1<<29)
+
+/* TWI USB0 host device pin */
+#define MSP_PIN_USB0_HOST_DEV 49
+
+/* TWI USB1 host device pin */
+#define MSP_PIN_USB1_HOST_DEV 50
+
+
+static void usb_hcd_tdi_set_mode(struct ehci_hcd *ehci)
+{
+ u8 *base;
+ u8 *statreg;
+ u8 *fiforeg;
+ u32 val;
+ struct ehci_regs *reg_base = ehci->regs;
+
+ /* get register base */
+ base = (u8 *)reg_base + USB_EHCI_REG_USB_MODE;
+ statreg = (u8 *)reg_base + USB_EHCI_REG_USB_STATUS;
+ fiforeg = (u8 *)reg_base + USB_EHCI_REG_USB_FIFO;
+
+ /* Disable controller mode stream */
+ val = ehci_readl(ehci, (u32 *)base);
+ ehci_writel(ehci, (val | USB_CTRL_MODE_STREAM_DISABLE),
+ (u32 *)base);
+
+ /* clear STS to select parallel transceiver interface */
+ val = ehci_readl(ehci, (u32 *)statreg);
+ val = val & ~USB_EHCI_REG_BIT_STAT_STS;
+ ehci_writel(ehci, val, (u32 *)statreg);
+
+ /* write to set the proper fifo threshold */
+ ehci_writel(ehci, USB_CTRL_FIFO_THRESH, (u32 *)fiforeg);
+
+ /* set TWI GPIO USB_HOST_DEV pin high */
+ gpio_direction_output(MSP_PIN_USB0_HOST_DEV, 1);
+#ifdef CONFIG_MSP_HAS_DUAL_USB
+ gpio_direction_output(MSP_PIN_USB1_HOST_DEV, 1);
+#endif
+}
+
+/* called during probe() after chip reset completes */
+static int ehci_msp_setup(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int retval;
+ ehci->big_endian_mmio = 1;
+ ehci->big_endian_desc = 1;
+
+ ehci->caps = hcd->regs;
+ ehci->regs = hcd->regs +
+ HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
+ dbg_hcs_params(ehci, "reset");
+ dbg_hcc_params(ehci, "reset");
+
+ /* cache this readonly data; minimize chip reads */
+ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+ hcd->has_tt = 1;
+
+ retval = ehci_halt(ehci);
+ if (retval)
+ return retval;
+
+ ehci_reset(ehci);
+
+ /* data structure init */
+ retval = ehci_init(hcd);
+ if (retval)
+ return retval;
+
+ usb_hcd_tdi_set_mode(ehci);
+ ehci_port_power(ehci, 0);
+
+ return retval;
+}
+
+
+/* configure so an HC device and id are always provided
+ * always called with process context; sleeping is OK
+ */
+
+static int usb_hcd_msp_map_regs(struct mspusb_device *dev)
+{
+ struct resource *res;
+ struct platform_device *pdev = &dev->dev;
+ u32 res_len;
+ int retval;
+
+ /* MAB register space */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res == NULL)
+ return -ENOMEM;
+ res_len = res->end - res->start + 1;
+ if (!request_mem_region(res->start, res_len, "mab regs"))
+ return -EBUSY;
+
+ dev->mab_regs = ioremap_nocache(res->start, res_len);
+ if (dev->mab_regs == NULL) {
+ retval = -ENOMEM;
+ goto err1;
+ }
+
+ /* MSP USB register space */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (res == NULL) {
+ retval = -ENOMEM;
+ goto err2;
+ }
+ res_len = res->end - res->start + 1;
+ if (!request_mem_region(res->start, res_len, "usbid regs")) {
+ retval = -EBUSY;
+ goto err2;
+ }
+ dev->usbid_regs = ioremap_nocache(res->start, res_len);
+ if (dev->usbid_regs == NULL) {
+ retval = -ENOMEM;
+ goto err3;
+ }
+
+ return 0;
+err3:
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ res_len = res->end - res->start + 1;
+ release_mem_region(res->start, res_len);
+err2:
+ iounmap(dev->mab_regs);
+err1:
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ res_len = res->end - res->start + 1;
+ release_mem_region(res->start, res_len);
+ dev_err(&pdev->dev, "Failed to map non-EHCI regs.\n");
+ return retval;
+}
+
+/**
+ * usb_hcd_msp_probe - initialize PMC MSP-based HCDs
+ * Context: !in_interrupt()
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ *
+ */
+int usb_hcd_msp_probe(const struct hc_driver *driver,
+ struct platform_device *dev)
+{
+ int retval;
+ struct usb_hcd *hcd;
+ struct resource *res;
+ struct ehci_hcd *ehci ;
+
+ hcd = usb_create_hcd(driver, &dev->dev, "pmcmsp");
+ if (!hcd)
+ return -ENOMEM;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ pr_debug("No IOMEM resource info for %s.\n", dev->name);
+ retval = -ENOMEM;
+ goto err1;
+ }
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = res->end - res->start + 1;
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, dev->name)) {
+ retval = -EBUSY;
+ goto err1;
+ }
+ hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
+ if (!hcd->regs) {
+ pr_debug("ioremap failed");
+ retval = -ENOMEM;
+ goto err2;
+ }
+
+ res = platform_get_resource(dev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(&dev->dev, "No IRQ resource info for %s.\n", dev->name);
+ retval = -ENOMEM;
+ goto err3;
+ }
+
+ /* Map non-EHCI register spaces */
+ retval = usb_hcd_msp_map_regs(to_mspusb_device(dev));
+ if (retval != 0)
+ goto err3;
+
+ ehci = hcd_to_ehci(hcd);
+ ehci->big_endian_mmio = 1;
+ ehci->big_endian_desc = 1;
+
+
+ retval = usb_add_hcd(hcd, res->start, IRQF_SHARED);
+ if (retval == 0)
+ return 0;
+
+ usb_remove_hcd(hcd);
+err3:
+ iounmap(hcd->regs);
+err2:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err1:
+ usb_put_hcd(hcd);
+
+ return retval;
+}
+
+
+
+/**
+ * usb_hcd_msp_remove - shutdown processing for PMC MSP-based HCDs
+ * @dev: USB Host Controller being removed
+ * Context: !in_interrupt()
+ *
+ * Reverses the effect of usb_hcd_msp_probe(), first invoking
+ * the HCD's stop() method. It is always called from a thread
+ * context, normally "rmmod", "apmd", or something similar.
+ *
+ * may be called without controller electrically present
+ * may be called with controller, bus, and devices active
+ */
+void usb_hcd_msp_remove(struct usb_hcd *hcd, struct platform_device *dev)
+{
+ usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+}
+
+#ifdef CONFIG_MSP_HAS_DUAL_USB
+/*
+ * Wrapper around the main ehci_irq. Since both USB host controllers are
+ * sharing the same IRQ, need to first determine whether we're the intended
+ * recipient of this interrupt.
+ */
+static irqreturn_t ehci_msp_irq(struct usb_hcd *hcd)
+{
+ u32 int_src;
+ struct device *dev = hcd->self.controller;
+ struct platform_device *pdev;
+ struct mspusb_device *mdev;
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ /* need to reverse-map a couple of containers to get our device */
+ pdev = to_platform_device(dev);
+ mdev = to_mspusb_device(pdev);
+
+ /* Check to see if this interrupt is for this host controller */
+ int_src = ehci_readl(ehci, &mdev->mab_regs->int_stat);
+ if (int_src & (1 << pdev->id))
+ return ehci_irq(hcd);
+
+ /* Not for this device */
+ return IRQ_NONE;
+}
+#endif /* DUAL_USB */
+
+static const struct hc_driver ehci_msp_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "PMC MSP EHCI",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+#ifdef CONFIG_MSP_HAS_DUAL_USB
+ .irq = ehci_msp_irq,
+#else
+ .irq = ehci_irq,
+#endif
+ .flags = HCD_MEMORY | HCD_USB2,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ehci_msp_setup,
+ .start = ehci_run,
+ .shutdown = ehci_shutdown,
+ .start = ehci_run,
+ .stop = ehci_stop,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+static int ehci_hcd_msp_drv_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ pr_debug("In ehci_hcd_msp_drv_probe");
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ gpio_request(MSP_PIN_USB0_HOST_DEV, "USB0_HOST_DEV_GPIO");
+#ifdef CONFIG_MSP_HAS_DUAL_USB
+ gpio_request(MSP_PIN_USB1_HOST_DEV, "USB1_HOST_DEV_GPIO");
+#endif
+
+ ret = usb_hcd_msp_probe(&ehci_msp_hc_driver, pdev);
+
+ return ret;
+}
+
+static int ehci_hcd_msp_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ usb_hcd_msp_remove(hcd, pdev);
+
+ /* free TWI GPIO USB_HOST_DEV pin */
+ gpio_free(MSP_PIN_USB0_HOST_DEV);
+#ifdef CONFIG_MSP_HAS_DUAL_USB
+ gpio_free(MSP_PIN_USB1_HOST_DEV);
+#endif
+
+ return 0;
+}
+
+MODULE_ALIAS("pmcmsp-ehci");
+
+static struct platform_driver ehci_hcd_msp_driver = {
+ .probe = ehci_hcd_msp_drv_probe,
+ .remove = ehci_hcd_msp_drv_remove,
+ .driver = {
+ .name = "pmcmsp-ehci",
+ .owner = THIS_MODULE,
+ },
+};
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index baf7362b0e4..42abd0f603b 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -1095,22 +1095,24 @@ submit_async (
struct list_head *qtd_list,
gfp_t mem_flags
) {
- struct ehci_qtd *qtd;
int epnum;
unsigned long flags;
struct ehci_qh *qh = NULL;
int rc;
- qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list);
epnum = urb->ep->desc.bEndpointAddress;
#ifdef EHCI_URB_TRACE
- ehci_dbg (ehci,
- "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
- __func__, urb->dev->devpath, urb,
- epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
- urb->transfer_buffer_length,
- qtd, urb->ep->hcpriv);
+ {
+ struct ehci_qtd *qtd;
+ qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
+ ehci_dbg(ehci,
+ "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+ __func__, urb->dev->devpath, urb,
+ epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
+ urb->transfer_buffer_length,
+ qtd, urb->ep->hcpriv);
+ }
#endif
spin_lock_irqsave (&ehci->lock, flags);
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index aa46f57f9ec..1543c838b3d 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1048,8 +1048,6 @@ iso_stream_put(struct ehci_hcd *ehci, struct ehci_iso_stream *stream)
* not like a QH -- no persistent state (toggle, halt)
*/
if (stream->refcount == 1) {
- int is_in;
-
// BUG_ON (!list_empty(&stream->td_list));
while (!list_empty (&stream->free_list)) {
@@ -1076,7 +1074,6 @@ iso_stream_put(struct ehci_hcd *ehci, struct ehci_iso_stream *stream)
}
}
- is_in = (stream->bEndpointAddress & USB_DIR_IN) ? 0x10 : 0;
stream->bEndpointAddress &= 0x0f;
if (stream->ep)
stream->ep->hcpriv = NULL;
@@ -1590,63 +1587,6 @@ itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
*hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
}
-#define AB_REG_BAR_LOW 0xe0
-#define AB_REG_BAR_HIGH 0xe1
-#define AB_INDX(addr) ((addr) + 0x00)
-#define AB_DATA(addr) ((addr) + 0x04)
-#define NB_PCIE_INDX_ADDR 0xe0
-#define NB_PCIE_INDX_DATA 0xe4
-#define NB_PIF0_PWRDOWN_0 0x01100012
-#define NB_PIF0_PWRDOWN_1 0x01100013
-
-static void ehci_quirk_amd_L1(struct ehci_hcd *ehci, int disable)
-{
- u32 addr, addr_low, addr_high, val;
-
- outb_p(AB_REG_BAR_LOW, 0xcd6);
- addr_low = inb_p(0xcd7);
- outb_p(AB_REG_BAR_HIGH, 0xcd6);
- addr_high = inb_p(0xcd7);
- addr = addr_high << 8 | addr_low;
- outl_p(0x30, AB_INDX(addr));
- outl_p(0x40, AB_DATA(addr));
- outl_p(0x34, AB_INDX(addr));
- val = inl_p(AB_DATA(addr));
-
- if (disable) {
- val &= ~0x8;
- val |= (1 << 4) | (1 << 9);
- } else {
- val |= 0x8;
- val &= ~((1 << 4) | (1 << 9));
- }
- outl_p(val, AB_DATA(addr));
-
- if (amd_nb_dev) {
- addr = NB_PIF0_PWRDOWN_0;
- pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_ADDR, addr);
- pci_read_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, &val);
- if (disable)
- val &= ~(0x3f << 7);
- else
- val |= 0x3f << 7;
-
- pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, val);
-
- addr = NB_PIF0_PWRDOWN_1;
- pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_ADDR, addr);
- pci_read_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, &val);
- if (disable)
- val &= ~(0x3f << 7);
- else
- val |= 0x3f << 7;
-
- pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, val);
- }
-
- return;
-}
-
/* fit urb's itds into the selected schedule slot; activate as needed */
static int
itd_link_urb (
@@ -1675,8 +1615,8 @@ itd_link_urb (
}
if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
- if (ehci->amd_l1_fix == 1)
- ehci_quirk_amd_L1(ehci, 1);
+ if (ehci->amd_pll_fix == 1)
+ usb_amd_quirk_pll_disable();
}
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
@@ -1804,8 +1744,8 @@ itd_complete (
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
- if (ehci->amd_l1_fix == 1)
- ehci_quirk_amd_L1(ehci, 0);
+ if (ehci->amd_pll_fix == 1)
+ usb_amd_quirk_pll_enable();
}
if (unlikely(list_is_singular(&stream->td_list))) {
@@ -2095,8 +2035,8 @@ sitd_link_urb (
}
if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
- if (ehci->amd_l1_fix == 1)
- ehci_quirk_amd_L1(ehci, 1);
+ if (ehci->amd_pll_fix == 1)
+ usb_amd_quirk_pll_disable();
}
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
@@ -2200,8 +2140,8 @@ sitd_complete (
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
- if (ehci->amd_l1_fix == 1)
- ehci_quirk_amd_L1(ehci, 0);
+ if (ehci->amd_pll_fix == 1)
+ usb_amd_quirk_pll_enable();
}
if (list_is_singular(&stream->td_list)) {
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
new file mode 100644
index 00000000000..a516af28c29
--- /dev/null
+++ b/drivers/usb/host/ehci-tegra.c
@@ -0,0 +1,715 @@
+/*
+ * EHCI-compliant USB host controller driver for NVIDIA Tegra SoCs
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2009 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/tegra_usb.h>
+#include <linux/irq.h>
+#include <linux/usb/otg.h>
+#include <mach/usb_phy.h>
+
+#define TEGRA_USB_DMA_ALIGN 32
+
+struct tegra_ehci_hcd {
+ struct ehci_hcd *ehci;
+ struct tegra_usb_phy *phy;
+ struct clk *clk;
+ struct clk *emc_clk;
+ struct otg_transceiver *transceiver;
+ int host_resumed;
+ int bus_suspended;
+ int port_resuming;
+ int power_down_on_bus_suspend;
+ enum tegra_usb_phy_port_speed port_speed;
+};
+
+static void tegra_ehci_power_up(struct usb_hcd *hcd)
+{
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+
+ clk_enable(tegra->emc_clk);
+ clk_enable(tegra->clk);
+ tegra_usb_phy_power_on(tegra->phy);
+ tegra->host_resumed = 1;
+}
+
+static void tegra_ehci_power_down(struct usb_hcd *hcd)
+{
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+
+ tegra->host_resumed = 0;
+ tegra_usb_phy_power_off(tegra->phy);
+ clk_disable(tegra->clk);
+ clk_disable(tegra->emc_clk);
+}
+
+static int tegra_ehci_hub_control(
+ struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue,
+ u16 wIndex,
+ char *buf,
+ u16 wLength
+)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+ u32 __iomem *status_reg;
+ u32 temp;
+ unsigned long flags;
+ int retval = 0;
+
+ status_reg = &ehci->regs->port_status[(wIndex & 0xff) - 1];
+
+ spin_lock_irqsave(&ehci->lock, flags);
+
+ /*
+ * In ehci_hub_control() for USB_PORT_FEAT_ENABLE clears the other bits
+ * that are write on clear, by writing back the register read value, so
+ * USB_PORT_FEAT_ENABLE is handled by masking the set on clear bits
+ */
+ if (typeReq == ClearPortFeature && wValue == USB_PORT_FEAT_ENABLE) {
+ temp = ehci_readl(ehci, status_reg) & ~PORT_RWC_BITS;
+ ehci_writel(ehci, temp & ~PORT_PE, status_reg);
+ goto done;
+ }
+
+ else if (typeReq == GetPortStatus) {
+ temp = ehci_readl(ehci, status_reg);
+ if (tegra->port_resuming && !(temp & PORT_SUSPEND)) {
+ /* Resume completed, re-enable disconnect detection */
+ tegra->port_resuming = 0;
+ tegra_usb_phy_postresume(tegra->phy);
+ }
+ }
+
+ else if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
+ temp = ehci_readl(ehci, status_reg);
+ if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
+ retval = -EPIPE;
+ goto done;
+ }
+
+ temp &= ~PORT_WKCONN_E;
+ temp |= PORT_WKDISC_E | PORT_WKOC_E;
+ ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
+
+ /*
+ * If a transaction is in progress, there may be a delay in
+ * suspending the port. Poll until the port is suspended.
+ */
+ if (handshake(ehci, status_reg, PORT_SUSPEND,
+ PORT_SUSPEND, 5000))
+ pr_err("%s: timeout waiting for SUSPEND\n", __func__);
+
+ set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
+ goto done;
+ }
+
+ /*
+ * Tegra host controller will time the resume operation to clear the bit
+ * when the port control state switches to HS or FS Idle. This behavior
+ * is different from EHCI where the host controller driver is required
+ * to set this bit to a zero after the resume duration is timed in the
+ * driver.
+ */
+ else if (typeReq == ClearPortFeature &&
+ wValue == USB_PORT_FEAT_SUSPEND) {
+ temp = ehci_readl(ehci, status_reg);
+ if ((temp & PORT_RESET) || !(temp & PORT_PE)) {
+ retval = -EPIPE;
+ goto done;
+ }
+
+ if (!(temp & PORT_SUSPEND))
+ goto done;
+
+ /* Disable disconnect detection during port resume */
+ tegra_usb_phy_preresume(tegra->phy);
+
+ ehci->reset_done[wIndex-1] = jiffies + msecs_to_jiffies(25);
+
+ temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
+ /* start resume signalling */
+ ehci_writel(ehci, temp | PORT_RESUME, status_reg);
+
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ msleep(20);
+ spin_lock_irqsave(&ehci->lock, flags);
+
+ /* Poll until the controller clears RESUME and SUSPEND */
+ if (handshake(ehci, status_reg, PORT_RESUME, 0, 2000))
+ pr_err("%s: timeout waiting for RESUME\n", __func__);
+ if (handshake(ehci, status_reg, PORT_SUSPEND, 0, 2000))
+ pr_err("%s: timeout waiting for SUSPEND\n", __func__);
+
+ ehci->reset_done[wIndex-1] = 0;
+
+ tegra->port_resuming = 1;
+ goto done;
+ }
+
+ spin_unlock_irqrestore(&ehci->lock, flags);
+
+ /* Handle the hub control events here */
+ return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+done:
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ return retval;
+}
+
+static void tegra_ehci_restart(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+ ehci_reset(ehci);
+
+ /* setup the frame list and Async q heads */
+ ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
+ ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next);
+ /* setup the command register and set the controller in RUN mode */
+ ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
+ ehci->command |= CMD_RUN;
+ ehci_writel(ehci, ehci->command, &ehci->regs->command);
+
+ down_write(&ehci_cf_port_reset_rwsem);
+ ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
+ /* flush posted writes */
+ ehci_readl(ehci, &ehci->regs->command);
+ up_write(&ehci_cf_port_reset_rwsem);
+}
+
+static int tegra_usb_suspend(struct usb_hcd *hcd)
+{
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+ struct ehci_regs __iomem *hw = tegra->ehci->regs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tegra->ehci->lock, flags);
+
+ tegra->port_speed = (readl(&hw->port_status[0]) >> 26) & 0x3;
+ ehci_halt(tegra->ehci);
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+ spin_unlock_irqrestore(&tegra->ehci->lock, flags);
+
+ tegra_ehci_power_down(hcd);
+ return 0;
+}
+
+static int tegra_usb_resume(struct usb_hcd *hcd)
+{
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct ehci_regs __iomem *hw = ehci->regs;
+ unsigned long val;
+
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ tegra_ehci_power_up(hcd);
+
+ if (tegra->port_speed > TEGRA_USB_PHY_PORT_SPEED_HIGH) {
+ /* Wait for the phy to detect new devices
+ * before we restart the controller */
+ msleep(10);
+ goto restart;
+ }
+
+ /* Force the phy to keep data lines in suspend state */
+ tegra_ehci_phy_restore_start(tegra->phy, tegra->port_speed);
+
+ /* Enable host mode */
+ tdi_reset(ehci);
+
+ /* Enable Port Power */
+ val = readl(&hw->port_status[0]);
+ val |= PORT_POWER;
+ writel(val, &hw->port_status[0]);
+ udelay(10);
+
+ /* Check if the phy resume from LP0. When the phy resume from LP0
+ * USB register will be reset. */
+ if (!readl(&hw->async_next)) {
+ /* Program the field PTC based on the saved speed mode */
+ val = readl(&hw->port_status[0]);
+ val &= ~PORT_TEST(~0);
+ if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_HIGH)
+ val |= PORT_TEST_FORCE;
+ else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_FULL)
+ val |= PORT_TEST(6);
+ else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW)
+ val |= PORT_TEST(7);
+ writel(val, &hw->port_status[0]);
+ udelay(10);
+
+ /* Disable test mode by setting PTC field to NORMAL_OP */
+ val = readl(&hw->port_status[0]);
+ val &= ~PORT_TEST(~0);
+ writel(val, &hw->port_status[0]);
+ udelay(10);
+ }
+
+ /* Poll until CCS is enabled */
+ if (handshake(ehci, &hw->port_status[0], PORT_CONNECT,
+ PORT_CONNECT, 2000)) {
+ pr_err("%s: timeout waiting for PORT_CONNECT\n", __func__);
+ goto restart;
+ }
+
+ /* Poll until PE is enabled */
+ if (handshake(ehci, &hw->port_status[0], PORT_PE,
+ PORT_PE, 2000)) {
+ pr_err("%s: timeout waiting for USB_PORTSC1_PE\n", __func__);
+ goto restart;
+ }
+
+ /* Clear the PCI status, to avoid an interrupt taken upon resume */
+ val = readl(&hw->status);
+ val |= STS_PCD;
+ writel(val, &hw->status);
+
+ /* Put controller in suspend mode by writing 1 to SUSP bit of PORTSC */
+ val = readl(&hw->port_status[0]);
+ if ((val & PORT_POWER) && (val & PORT_PE)) {
+ val |= PORT_SUSPEND;
+ writel(val, &hw->port_status[0]);
+
+ /* Wait until port suspend completes */
+ if (handshake(ehci, &hw->port_status[0], PORT_SUSPEND,
+ PORT_SUSPEND, 1000)) {
+ pr_err("%s: timeout waiting for PORT_SUSPEND\n",
+ __func__);
+ goto restart;
+ }
+ }
+
+ tegra_ehci_phy_restore_end(tegra->phy);
+ return 0;
+
+restart:
+ if (tegra->port_speed <= TEGRA_USB_PHY_PORT_SPEED_HIGH)
+ tegra_ehci_phy_restore_end(tegra->phy);
+
+ tegra_ehci_restart(hcd);
+ return 0;
+}
+
+static void tegra_ehci_shutdown(struct usb_hcd *hcd)
+{
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+
+ /* ehci_shutdown touches the USB controller registers, make sure
+ * controller has clocks to it */
+ if (!tegra->host_resumed)
+ tegra_ehci_power_up(hcd);
+
+ ehci_shutdown(hcd);
+}
+
+static int tegra_ehci_setup(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int retval;
+
+ /* EHCI registers start at offset 0x100 */
+ ehci->caps = hcd->regs + 0x100;
+ ehci->regs = hcd->regs + 0x100 +
+ HC_LENGTH(readl(&ehci->caps->hc_capbase));
+
+ dbg_hcs_params(ehci, "reset");
+ dbg_hcc_params(ehci, "reset");
+
+ /* cache this readonly data; minimize chip reads */
+ ehci->hcs_params = readl(&ehci->caps->hcs_params);
+
+ /* switch to host mode */
+ hcd->has_tt = 1;
+ ehci_reset(ehci);
+
+ retval = ehci_halt(ehci);
+ if (retval)
+ return retval;
+
+ /* data structure init */
+ retval = ehci_init(hcd);
+ if (retval)
+ return retval;
+
+ ehci->sbrn = 0x20;
+
+ ehci_port_power(ehci, 1);
+ return retval;
+}
+
+#ifdef CONFIG_PM
+static int tegra_ehci_bus_suspend(struct usb_hcd *hcd)
+{
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+ int error_status = 0;
+
+ error_status = ehci_bus_suspend(hcd);
+ if (!error_status && tegra->power_down_on_bus_suspend) {
+ tegra_usb_suspend(hcd);
+ tegra->bus_suspended = 1;
+ }
+
+ return error_status;
+}
+
+static int tegra_ehci_bus_resume(struct usb_hcd *hcd)
+{
+ struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+
+ if (tegra->bus_suspended && tegra->power_down_on_bus_suspend) {
+ tegra_usb_resume(hcd);
+ tegra->bus_suspended = 0;
+ }
+
+ tegra_usb_phy_preresume(tegra->phy);
+ tegra->port_resuming = 1;
+ return ehci_bus_resume(hcd);
+}
+#endif
+
+struct temp_buffer {
+ void *kmalloc_ptr;
+ void *old_xfer_buffer;
+ u8 data[0];
+};
+
+static void free_temp_buffer(struct urb *urb)
+{
+ enum dma_data_direction dir;
+ struct temp_buffer *temp;
+
+ if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
+ return;
+
+ dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ temp = container_of(urb->transfer_buffer, struct temp_buffer,
+ data);
+
+ if (dir == DMA_FROM_DEVICE)
+ memcpy(temp->old_xfer_buffer, temp->data,
+ urb->transfer_buffer_length);
+ urb->transfer_buffer = temp->old_xfer_buffer;
+ kfree(temp->kmalloc_ptr);
+
+ urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
+}
+
+static int alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
+{
+ enum dma_data_direction dir;
+ struct temp_buffer *temp, *kmalloc_ptr;
+ size_t kmalloc_size;
+
+ if (urb->num_sgs || urb->sg ||
+ urb->transfer_buffer_length == 0 ||
+ !((uintptr_t)urb->transfer_buffer & (TEGRA_USB_DMA_ALIGN - 1)))
+ return 0;
+
+ dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ /* Allocate a buffer with enough padding for alignment */
+ kmalloc_size = urb->transfer_buffer_length +
+ sizeof(struct temp_buffer) + TEGRA_USB_DMA_ALIGN - 1;
+
+ kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
+ if (!kmalloc_ptr)
+ return -ENOMEM;
+
+ /* Position our struct temp_buffer such that data is aligned */
+ temp = PTR_ALIGN(kmalloc_ptr + 1, TEGRA_USB_DMA_ALIGN) - 1;
+
+ temp->kmalloc_ptr = kmalloc_ptr;
+ temp->old_xfer_buffer = urb->transfer_buffer;
+ if (dir == DMA_TO_DEVICE)
+ memcpy(temp->data, urb->transfer_buffer,
+ urb->transfer_buffer_length);
+ urb->transfer_buffer = temp->data;
+
+ urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
+
+ return 0;
+}
+
+static int tegra_ehci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ int ret;
+
+ ret = alloc_temp_buffer(urb, mem_flags);
+ if (ret)
+ return ret;
+
+ ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+ if (ret)
+ free_temp_buffer(urb);
+
+ return ret;
+}
+
+static void tegra_ehci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+{
+ usb_hcd_unmap_urb_for_dma(hcd, urb);
+ free_temp_buffer(urb);
+}
+
+static const struct hc_driver tegra_ehci_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Tegra EHCI Host Controller",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ .flags = HCD_USB2 | HCD_MEMORY,
+
+ .reset = tegra_ehci_setup,
+ .irq = ehci_irq,
+
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = tegra_ehci_shutdown,
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .map_urb_for_dma = tegra_ehci_map_urb_for_dma,
+ .unmap_urb_for_dma = tegra_ehci_unmap_urb_for_dma,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+ .get_frame_number = ehci_get_frame,
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = tegra_ehci_hub_control,
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+#ifdef CONFIG_PM
+ .bus_suspend = tegra_ehci_bus_suspend,
+ .bus_resume = tegra_ehci_bus_resume,
+#endif
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+};
+
+static int tegra_ehci_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct usb_hcd *hcd;
+ struct tegra_ehci_hcd *tegra;
+ struct tegra_ehci_platform_data *pdata;
+ int err = 0;
+ int irq;
+ int instance = pdev->id;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "Platform data missing\n");
+ return -EINVAL;
+ }
+
+ tegra = kzalloc(sizeof(struct tegra_ehci_hcd), GFP_KERNEL);
+ if (!tegra)
+ return -ENOMEM;
+
+ hcd = usb_create_hcd(&tegra_ehci_hc_driver, &pdev->dev,
+ dev_name(&pdev->dev));
+ if (!hcd) {
+ dev_err(&pdev->dev, "Unable to create HCD\n");
+ err = -ENOMEM;
+ goto fail_hcd;
+ }
+
+ platform_set_drvdata(pdev, tegra);
+
+ tegra->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tegra->clk)) {
+ dev_err(&pdev->dev, "Can't get ehci clock\n");
+ err = PTR_ERR(tegra->clk);
+ goto fail_clk;
+ }
+
+ err = clk_enable(tegra->clk);
+ if (err)
+ goto fail_clken;
+
+ tegra->emc_clk = clk_get(&pdev->dev, "emc");
+ if (IS_ERR(tegra->emc_clk)) {
+ dev_err(&pdev->dev, "Can't get emc clock\n");
+ err = PTR_ERR(tegra->emc_clk);
+ goto fail_emc_clk;
+ }
+
+ clk_enable(tegra->emc_clk);
+ clk_set_rate(tegra->emc_clk, 400000000);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get I/O memory\n");
+ err = -ENXIO;
+ goto fail_io;
+ }
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ hcd->regs = ioremap(res->start, resource_size(res));
+ if (!hcd->regs) {
+ dev_err(&pdev->dev, "Failed to remap I/O memory\n");
+ err = -ENOMEM;
+ goto fail_io;
+ }
+
+ tegra->phy = tegra_usb_phy_open(instance, hcd->regs, pdata->phy_config,
+ TEGRA_USB_PHY_MODE_HOST);
+ if (IS_ERR(tegra->phy)) {
+ dev_err(&pdev->dev, "Failed to open USB phy\n");
+ err = -ENXIO;
+ goto fail_phy;
+ }
+
+ err = tegra_usb_phy_power_on(tegra->phy);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to power on the phy\n");
+ goto fail;
+ }
+
+ tegra->host_resumed = 1;
+ tegra->power_down_on_bus_suspend = pdata->power_down_on_bus_suspend;
+ tegra->ehci = hcd_to_ehci(hcd);
+
+ irq = platform_get_irq(pdev, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "Failed to get IRQ\n");
+ err = -ENODEV;
+ goto fail;
+ }
+ set_irq_flags(irq, IRQF_VALID);
+
+#ifdef CONFIG_USB_OTG_UTILS
+ if (pdata->operating_mode == TEGRA_USB_OTG) {
+ tegra->transceiver = otg_get_transceiver();
+ if (tegra->transceiver)
+ otg_set_host(tegra->transceiver, &hcd->self);
+ }
+#endif
+
+ err = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to add USB HCD\n");
+ goto fail;
+ }
+
+ return err;
+
+fail:
+#ifdef CONFIG_USB_OTG_UTILS
+ if (tegra->transceiver) {
+ otg_set_host(tegra->transceiver, NULL);
+ otg_put_transceiver(tegra->transceiver);
+ }
+#endif
+ tegra_usb_phy_close(tegra->phy);
+fail_phy:
+ iounmap(hcd->regs);
+fail_io:
+ clk_disable(tegra->emc_clk);
+ clk_put(tegra->emc_clk);
+fail_emc_clk:
+ clk_disable(tegra->clk);
+fail_clken:
+ clk_put(tegra->clk);
+fail_clk:
+ usb_put_hcd(hcd);
+fail_hcd:
+ kfree(tegra);
+ return err;
+}
+
+#ifdef CONFIG_PM
+static int tegra_ehci_resume(struct platform_device *pdev)
+{
+ struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
+
+ if (tegra->bus_suspended)
+ return 0;
+
+ return tegra_usb_resume(hcd);
+}
+
+static int tegra_ehci_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
+
+ if (tegra->bus_suspended)
+ return 0;
+
+ if (time_before(jiffies, tegra->ehci->next_statechange))
+ msleep(10);
+
+ return tegra_usb_suspend(hcd);
+}
+#endif
+
+static int tegra_ehci_remove(struct platform_device *pdev)
+{
+ struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
+
+ if (tegra == NULL || hcd == NULL)
+ return -EINVAL;
+
+#ifdef CONFIG_USB_OTG_UTILS
+ if (tegra->transceiver) {
+ otg_set_host(tegra->transceiver, NULL);
+ otg_put_transceiver(tegra->transceiver);
+ }
+#endif
+
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
+
+ tegra_usb_phy_close(tegra->phy);
+ iounmap(hcd->regs);
+
+ clk_disable(tegra->clk);
+ clk_put(tegra->clk);
+
+ clk_disable(tegra->emc_clk);
+ clk_put(tegra->emc_clk);
+
+ kfree(tegra);
+ return 0;
+}
+
+static void tegra_ehci_hcd_shutdown(struct platform_device *pdev)
+{
+ struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
+
+ if (hcd->driver->shutdown)
+ hcd->driver->shutdown(hcd);
+}
+
+static struct platform_driver tegra_ehci_driver = {
+ .probe = tegra_ehci_probe,
+ .remove = tegra_ehci_remove,
+#ifdef CONFIG_PM
+ .suspend = tegra_ehci_suspend,
+ .resume = tegra_ehci_resume,
+#endif
+ .shutdown = tegra_ehci_hcd_shutdown,
+ .driver = {
+ .name = "tegra-ehci",
+ }
+};
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 799ac16a54b..f86d3fa2021 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -131,7 +131,7 @@ struct ehci_hcd { /* one per controller */
unsigned has_amcc_usb23:1;
unsigned need_io_watchdog:1;
unsigned broken_periodic:1;
- unsigned amd_l1_fix:1;
+ unsigned amd_pll_fix:1;
unsigned fs_i_thresh:1; /* Intel iso scheduling */
unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index f90d003f230..2562e92e317 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -927,7 +927,8 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
if (state == US_CTRL_SETUP) {
dir = TD_DIR_SETUP;
if (unsuitable_for_dma(urb->setup_dma))
- unmap_urb_setup_for_dma(imx21->hcd, urb);
+ usb_hcd_unmap_urb_setup_for_dma(imx21->hcd,
+ urb);
etd->dma_handle = urb->setup_dma;
etd->cpu_buffer = urb->setup_packet;
bufround = 0;
@@ -943,7 +944,7 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN;
bufround = (dir == TD_DIR_IN) ? 1 : 0;
if (unsuitable_for_dma(urb->transfer_dma))
- unmap_urb_for_dma(imx21->hcd, urb);
+ usb_hcd_unmap_urb_for_dma(imx21->hcd, urb);
etd->dma_handle = urb->transfer_dma;
etd->cpu_buffer = urb->transfer_buffer;
@@ -1471,8 +1472,8 @@ static int get_hub_descriptor(struct usb_hcd *hcd,
0x0010 | /* No over current protection */
0);
- desc->bitmap[0] = 1 << 1;
- desc->bitmap[1] = ~0;
+ desc->u.hs.DeviceRemovable[0] = 1 << 1;
+ desc->u.hs.DeviceRemovable[1] = ~0;
return 0;
}
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index 0da7fc05f45..c0e22f26da1 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -951,9 +951,9 @@ static void isp116x_hub_descriptor(struct isp116x *isp116x,
/* Power switching, device type, overcurrent. */
desc->wHubCharacteristics = cpu_to_le16((u16) ((reg >> 8) & 0x1f));
desc->bPwrOn2PwrGood = (u8) ((reg >> 24) & 0xff);
- /* two bitmaps: ports removable, and legacy PortPwrCtrlMask */
- desc->bitmap[0] = 0;
- desc->bitmap[1] = ~0;
+ /* ports removable, and legacy PortPwrCtrlMask */
+ desc->u.hs.DeviceRemovable[0] = 0;
+ desc->u.hs.DeviceRemovable[1] = ~0;
}
/* Perform reset of a given port.
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 43a39eb56cc..662cd002adf 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -226,7 +226,6 @@ static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
{
- int index = ep->ptd_index;
int last = ep->ptd_index + ep->num_ptds;
if (last > epq->buf_count)
@@ -236,10 +235,8 @@ static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1
epq->buf_map, epq->skip_map);
BUG_ON(last > epq->buf_count);
- for (; index < last; index++) {
- __clear_bit(index, &epq->buf_map);
- __set_bit(index, &epq->skip_map);
- }
+ bitmap_clear(&epq->buf_map, ep->ptd_index, ep->num_ptds);
+ bitmap_set(&epq->skip_map, ep->ptd_index, ep->num_ptds);
epq->buf_avail += ep->num_ptds;
epq->ptd_count--;
@@ -1555,9 +1552,9 @@ static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
desc->wHubCharacteristics = cpu_to_le16((reg >> 8) & 0x1f);
DBG(0, "%s: hubcharacteristics = %02x\n", __func__, cpu_to_le16((reg >> 8) & 0x1f));
desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
- /* two bitmaps: ports removable, and legacy PortPwrCtrlMask */
- desc->bitmap[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
- desc->bitmap[1] = ~0;
+ /* ports removable, and legacy PortPwrCtrlMask */
+ desc->u.hs.DeviceRemovable[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
+ desc->u.hs.DeviceRemovable[1] = ~0;
DBG(3, "%s: exit\n", __func__);
}
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index c470cc83dbb..f50e84ac570 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -48,10 +48,6 @@ static inline struct isp1760_hcd *hcd_to_priv(struct usb_hcd *hcd)
{
return (struct isp1760_hcd *) (hcd->hcd_priv);
}
-static inline struct usb_hcd *priv_to_hcd(struct isp1760_hcd *priv)
-{
- return container_of((void *) priv, struct usb_hcd, hcd_priv);
-}
/* Section 2.2 Host Controller Capability Registers */
#define HC_LENGTH(p) (((p)>>00)&0x00ff) /* bits 7:0 */
@@ -81,11 +77,10 @@ static inline struct usb_hcd *priv_to_hcd(struct isp1760_hcd *priv)
#define PORT_RWC_BITS (PORT_CSC)
struct isp1760_qtd {
- struct isp1760_qtd *hw_next;
u8 packet_type;
- u8 toggle;
-
void *data_buffer;
+ u32 payload_addr;
+
/* the rest is HCD-private */
struct list_head qtd_list;
struct urb *urb;
@@ -93,205 +88,267 @@ struct isp1760_qtd {
/* isp special*/
u32 status;
-#define URB_COMPLETE_NOTIFY (1 << 0)
#define URB_ENQUEUED (1 << 1)
-#define URB_TYPE_ATL (1 << 2)
-#define URB_TYPE_INT (1 << 3)
};
struct isp1760_qh {
/* first part defined by EHCI spec */
struct list_head qtd_list;
- struct isp1760_hcd *priv;
-
- /* periodic schedule info */
- unsigned short period; /* polling interval */
- struct usb_device *dev;
u32 toggle;
u32 ping;
};
-#define ehci_port_speed(priv, portsc) USB_PORT_STAT_HIGH_SPEED
-
-static unsigned int isp1760_readl(__u32 __iomem *regs)
+/*
+ * Access functions for isp176x registers (addresses 0..0x03FF).
+ */
+static u32 reg_read32(void __iomem *base, u32 reg)
{
- return readl(regs);
+ return readl(base + reg);
}
-static void isp1760_writel(const unsigned int val, __u32 __iomem *regs)
+static void reg_write32(void __iomem *base, u32 reg, u32 val)
{
- writel(val, regs);
+ writel(val, base + reg);
}
/*
- * The next two copy via MMIO data to/from the device. memcpy_{to|from}io()
+ * Access functions for isp176x memory (offset >= 0x0400).
+ *
+ * bank_reads8() reads memory locations prefetched by an earlier write to
+ * HC_MEMORY_REG (see isp176x datasheet). Unless you want to do fancy multi-
+ * bank optimizations, you should use the more generic mem_reads8() below.
+ *
+ * For access to ptd memory, use the specialized ptd_read() and ptd_write()
+ * below.
+ *
+ * These functions copy via MMIO data to/from the device. memcpy_{to|from}io()
* doesn't quite work because some people have to enforce 32-bit access
*/
-static void priv_read_copy(struct isp1760_hcd *priv, u32 *src,
- __u32 __iomem *dst, u32 len)
+static void bank_reads8(void __iomem *src_base, u32 src_offset, u32 bank_addr,
+ __u32 *dst, u32 bytes)
{
+ __u32 __iomem *src;
u32 val;
- u8 *buff8;
+ __u8 *src_byteptr;
+ __u8 *dst_byteptr;
- if (!src) {
- printk(KERN_ERR "ERROR: buffer: %p len: %d\n", src, len);
- return;
- }
+ src = src_base + (bank_addr | src_offset);
- while (len >= 4) {
- *src = __raw_readl(dst);
- len -= 4;
- src++;
- dst++;
+ if (src_offset < PAYLOAD_OFFSET) {
+ while (bytes >= 4) {
+ *dst = le32_to_cpu(__raw_readl(src));
+ bytes -= 4;
+ src++;
+ dst++;
+ }
+ } else {
+ while (bytes >= 4) {
+ *dst = __raw_readl(src);
+ bytes -= 4;
+ src++;
+ dst++;
+ }
}
- if (!len)
+ if (!bytes)
return;
/* in case we have 3, 2 or 1 by left. The dst buffer may not be fully
* allocated.
*/
- val = isp1760_readl(dst);
-
- buff8 = (u8 *)src;
- while (len) {
-
- *buff8 = val;
- val >>= 8;
- len--;
- buff8++;
+ if (src_offset < PAYLOAD_OFFSET)
+ val = le32_to_cpu(__raw_readl(src));
+ else
+ val = __raw_readl(src);
+
+ dst_byteptr = (void *) dst;
+ src_byteptr = (void *) &val;
+ while (bytes > 0) {
+ *dst_byteptr = *src_byteptr;
+ dst_byteptr++;
+ src_byteptr++;
+ bytes--;
}
}
-static void priv_write_copy(const struct isp1760_hcd *priv, const u32 *src,
- __u32 __iomem *dst, u32 len)
+static void mem_reads8(void __iomem *src_base, u32 src_offset, void *dst,
+ u32 bytes)
{
- while (len >= 4) {
- __raw_writel(*src, dst);
- len -= 4;
- src++;
- dst++;
+ reg_write32(src_base, HC_MEMORY_REG, src_offset + ISP_BANK(0));
+ ndelay(90);
+ bank_reads8(src_base, src_offset, ISP_BANK(0), dst, bytes);
+}
+
+static void mem_writes8(void __iomem *dst_base, u32 dst_offset,
+ __u32 const *src, u32 bytes)
+{
+ __u32 __iomem *dst;
+
+ dst = dst_base + dst_offset;
+
+ if (dst_offset < PAYLOAD_OFFSET) {
+ while (bytes >= 4) {
+ __raw_writel(cpu_to_le32(*src), dst);
+ bytes -= 4;
+ src++;
+ dst++;
+ }
+ } else {
+ while (bytes >= 4) {
+ __raw_writel(*src, dst);
+ bytes -= 4;
+ src++;
+ dst++;
+ }
}
- if (!len)
+ if (!bytes)
return;
- /* in case we have 3, 2 or 1 by left. The buffer is allocated and the
- * extra bytes should not be read by the HW
+ /* in case we have 3, 2 or 1 bytes left. The buffer is allocated and the
+ * extra bytes should not be read by the HW.
*/
- __raw_writel(*src, dst);
+ if (dst_offset < PAYLOAD_OFFSET)
+ __raw_writel(cpu_to_le32(*src), dst);
+ else
+ __raw_writel(*src, dst);
+}
+
+/*
+ * Read and write ptds. 'ptd_offset' should be one of ISO_PTD_OFFSET,
+ * INT_PTD_OFFSET, and ATL_PTD_OFFSET. 'slot' should be less than 32.
+ */
+static void ptd_read(void __iomem *base, u32 ptd_offset, u32 slot,
+ struct ptd *ptd)
+{
+ reg_write32(base, HC_MEMORY_REG,
+ ISP_BANK(0) + ptd_offset + slot*sizeof(*ptd));
+ ndelay(90);
+ bank_reads8(base, ptd_offset + slot*sizeof(*ptd), ISP_BANK(0),
+ (void *) ptd, sizeof(*ptd));
+}
+
+static void ptd_write(void __iomem *base, u32 ptd_offset, u32 slot,
+ struct ptd *ptd)
+{
+ mem_writes8(base, ptd_offset + slot*sizeof(*ptd) + sizeof(ptd->dw0),
+ &ptd->dw1, 7*sizeof(ptd->dw1));
+ /* Make sure dw0 gets written last (after other dw's and after payload)
+ since it contains the enable bit */
+ wmb();
+ mem_writes8(base, ptd_offset + slot*sizeof(*ptd), &ptd->dw0,
+ sizeof(ptd->dw0));
}
+
/* memory management of the 60kb on the chip from 0x1000 to 0xffff */
static void init_memory(struct isp1760_hcd *priv)
{
- int i;
- u32 payload;
+ int i, curr;
+ u32 payload_addr;
- payload = 0x1000;
+ payload_addr = PAYLOAD_OFFSET;
for (i = 0; i < BLOCK_1_NUM; i++) {
- priv->memory_pool[i].start = payload;
+ priv->memory_pool[i].start = payload_addr;
priv->memory_pool[i].size = BLOCK_1_SIZE;
priv->memory_pool[i].free = 1;
- payload += priv->memory_pool[i].size;
+ payload_addr += priv->memory_pool[i].size;
}
-
- for (i = BLOCK_1_NUM; i < BLOCK_1_NUM + BLOCK_2_NUM; i++) {
- priv->memory_pool[i].start = payload;
- priv->memory_pool[i].size = BLOCK_2_SIZE;
- priv->memory_pool[i].free = 1;
- payload += priv->memory_pool[i].size;
+ curr = i;
+ for (i = 0; i < BLOCK_2_NUM; i++) {
+ priv->memory_pool[curr + i].start = payload_addr;
+ priv->memory_pool[curr + i].size = BLOCK_2_SIZE;
+ priv->memory_pool[curr + i].free = 1;
+ payload_addr += priv->memory_pool[curr + i].size;
}
-
- for (i = BLOCK_1_NUM + BLOCK_2_NUM; i < BLOCKS; i++) {
- priv->memory_pool[i].start = payload;
- priv->memory_pool[i].size = BLOCK_3_SIZE;
- priv->memory_pool[i].free = 1;
- payload += priv->memory_pool[i].size;
+ curr = i;
+ for (i = 0; i < BLOCK_3_NUM; i++) {
+ priv->memory_pool[curr + i].start = payload_addr;
+ priv->memory_pool[curr + i].size = BLOCK_3_SIZE;
+ priv->memory_pool[curr + i].free = 1;
+ payload_addr += priv->memory_pool[curr + i].size;
}
- BUG_ON(payload - priv->memory_pool[i - 1].size > PAYLOAD_SIZE);
+ BUG_ON(payload_addr - priv->memory_pool[0].start > PAYLOAD_AREA_SIZE);
}
-static u32 alloc_mem(struct isp1760_hcd *priv, u32 size)
+static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
int i;
- if (!size)
- return ISP1760_NULL_POINTER;
+ BUG_ON(qtd->payload_addr);
+
+ if (!qtd->length)
+ return;
for (i = 0; i < BLOCKS; i++) {
- if (priv->memory_pool[i].size >= size &&
+ if (priv->memory_pool[i].size >= qtd->length &&
priv->memory_pool[i].free) {
-
priv->memory_pool[i].free = 0;
- return priv->memory_pool[i].start;
+ qtd->payload_addr = priv->memory_pool[i].start;
+ return;
}
}
- printk(KERN_ERR "ISP1760 MEM: can not allocate %d bytes of memory\n",
- size);
- printk(KERN_ERR "Current memory map:\n");
+ dev_err(hcd->self.controller,
+ "%s: Can not allocate %lu bytes of memory\n"
+ "Current memory map:\n",
+ __func__, qtd->length);
for (i = 0; i < BLOCKS; i++) {
- printk(KERN_ERR "Pool %2d size %4d status: %d\n",
+ dev_err(hcd->self.controller, "Pool %2d size %4d status: %d\n",
i, priv->memory_pool[i].size,
priv->memory_pool[i].free);
}
/* XXX maybe -ENOMEM could be possible */
BUG();
- return 0;
+ return;
}
-static void free_mem(struct isp1760_hcd *priv, u32 mem)
+static void free_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
int i;
- if (mem == ISP1760_NULL_POINTER)
+ if (!qtd->payload_addr)
return;
for (i = 0; i < BLOCKS; i++) {
- if (priv->memory_pool[i].start == mem) {
-
+ if (priv->memory_pool[i].start == qtd->payload_addr) {
BUG_ON(priv->memory_pool[i].free);
-
priv->memory_pool[i].free = 1;
- return ;
+ qtd->payload_addr = 0;
+ return;
}
}
- printk(KERN_ERR "Trying to free not-here-allocated memory :%08x\n",
- mem);
+ dev_err(hcd->self.controller, "%s: Invalid pointer: %08x\n",
+ __func__, qtd->payload_addr);
BUG();
}
static void isp1760_init_regs(struct usb_hcd *hcd)
{
- isp1760_writel(0, hcd->regs + HC_BUFFER_STATUS_REG);
- isp1760_writel(NO_TRANSFER_ACTIVE, hcd->regs +
- HC_ATL_PTD_SKIPMAP_REG);
- isp1760_writel(NO_TRANSFER_ACTIVE, hcd->regs +
- HC_INT_PTD_SKIPMAP_REG);
- isp1760_writel(NO_TRANSFER_ACTIVE, hcd->regs +
- HC_ISO_PTD_SKIPMAP_REG);
-
- isp1760_writel(~NO_TRANSFER_ACTIVE, hcd->regs +
- HC_ATL_PTD_DONEMAP_REG);
- isp1760_writel(~NO_TRANSFER_ACTIVE, hcd->regs +
- HC_INT_PTD_DONEMAP_REG);
- isp1760_writel(~NO_TRANSFER_ACTIVE, hcd->regs +
- HC_ISO_PTD_DONEMAP_REG);
+ reg_write32(hcd->regs, HC_BUFFER_STATUS_REG, 0);
+ reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
+ reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
+ reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
+
+ reg_write32(hcd->regs, HC_ATL_PTD_DONEMAP_REG, ~NO_TRANSFER_ACTIVE);
+ reg_write32(hcd->regs, HC_INT_PTD_DONEMAP_REG, ~NO_TRANSFER_ACTIVE);
+ reg_write32(hcd->regs, HC_ISO_PTD_DONEMAP_REG, ~NO_TRANSFER_ACTIVE);
}
-static int handshake(struct isp1760_hcd *priv, void __iomem *ptr,
+static int handshake(struct usb_hcd *hcd, u32 reg,
u32 mask, u32 done, int usec)
{
u32 result;
do {
- result = isp1760_readl(ptr);
+ result = reg_read32(hcd->regs, reg);
if (result == ~0)
return -ENODEV;
result &= mask;
@@ -304,17 +361,18 @@ static int handshake(struct isp1760_hcd *priv, void __iomem *ptr,
}
/* reset a non-running (STS_HALT == 1) controller */
-static int ehci_reset(struct isp1760_hcd *priv)
+static int ehci_reset(struct usb_hcd *hcd)
{
int retval;
- struct usb_hcd *hcd = priv_to_hcd(priv);
- u32 command = isp1760_readl(hcd->regs + HC_USBCMD);
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+
+ u32 command = reg_read32(hcd->regs, HC_USBCMD);
command |= CMD_RESET;
- isp1760_writel(command, hcd->regs + HC_USBCMD);
+ reg_write32(hcd->regs, HC_USBCMD, command);
hcd->state = HC_STATE_HALT;
priv->next_statechange = jiffies;
- retval = handshake(priv, hcd->regs + HC_USBCMD,
+ retval = handshake(hcd, HC_USBCMD,
CMD_RESET, 0, 250 * 1000);
return retval;
}
@@ -325,8 +383,7 @@ static void qh_destroy(struct isp1760_qh *qh)
kmem_cache_free(qh_cachep, qh);
}
-static struct isp1760_qh *isp1760_qh_alloc(struct isp1760_hcd *priv,
- gfp_t flags)
+static struct isp1760_qh *isp1760_qh_alloc(gfp_t flags)
{
struct isp1760_qh *qh;
@@ -335,7 +392,6 @@ static struct isp1760_qh *isp1760_qh_alloc(struct isp1760_hcd *priv,
return qh;
INIT_LIST_HEAD(&qh->qtd_list);
- qh->priv = priv;
return qh;
}
@@ -362,7 +418,7 @@ static int priv_init(struct usb_hcd *hcd)
priv->periodic_size = DEFAULT_I_TDPS;
/* controllers may cache some of the periodic schedule ... */
- hcc_params = isp1760_readl(hcd->regs + HC_HCCPARAMS);
+ hcc_params = reg_read32(hcd->regs, HC_HCCPARAMS);
/* full frame cache */
if (HCC_ISOC_CACHE(hcc_params))
priv->i_thresh = 8;
@@ -399,15 +455,15 @@ static int isp1760_hc_setup(struct usb_hcd *hcd)
* Write it twice to ensure correct upper bits if switching
* to 16-bit mode.
*/
- isp1760_writel(hwmode, hcd->regs + HC_HW_MODE_CTRL);
- isp1760_writel(hwmode, hcd->regs + HC_HW_MODE_CTRL);
+ reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode);
+ reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode);
- isp1760_writel(0xdeadbabe, hcd->regs + HC_SCRATCH_REG);
+ reg_write32(hcd->regs, HC_SCRATCH_REG, 0xdeadbabe);
/* Change bus pattern */
- scratch = isp1760_readl(hcd->regs + HC_CHIP_ID_REG);
- scratch = isp1760_readl(hcd->regs + HC_SCRATCH_REG);
+ scratch = reg_read32(hcd->regs, HC_CHIP_ID_REG);
+ scratch = reg_read32(hcd->regs, HC_SCRATCH_REG);
if (scratch != 0xdeadbabe) {
- printk(KERN_ERR "ISP1760: Scratch test failed.\n");
+ dev_err(hcd->self.controller, "Scratch test failed.\n");
return -ENODEV;
}
@@ -415,30 +471,30 @@ static int isp1760_hc_setup(struct usb_hcd *hcd)
isp1760_init_regs(hcd);
/* reset */
- isp1760_writel(SW_RESET_RESET_ALL, hcd->regs + HC_RESET_REG);
+ reg_write32(hcd->regs, HC_RESET_REG, SW_RESET_RESET_ALL);
mdelay(100);
- isp1760_writel(SW_RESET_RESET_HC, hcd->regs + HC_RESET_REG);
+ reg_write32(hcd->regs, HC_RESET_REG, SW_RESET_RESET_HC);
mdelay(100);
- result = ehci_reset(priv);
+ result = ehci_reset(hcd);
if (result)
return result;
/* Step 11 passed */
- isp1760_info(priv, "bus width: %d, oc: %s\n",
+ dev_info(hcd->self.controller, "bus width: %d, oc: %s\n",
(priv->devflags & ISP1760_FLAG_BUS_WIDTH_16) ?
16 : 32, (priv->devflags & ISP1760_FLAG_ANALOG_OC) ?
"analog" : "digital");
/* ATL reset */
- isp1760_writel(hwmode | ALL_ATX_RESET, hcd->regs + HC_HW_MODE_CTRL);
+ reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode | ALL_ATX_RESET);
mdelay(10);
- isp1760_writel(hwmode, hcd->regs + HC_HW_MODE_CTRL);
+ reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode);
- isp1760_writel(INTERRUPT_ENABLE_MASK, hcd->regs + HC_INTERRUPT_REG);
- isp1760_writel(INTERRUPT_ENABLE_MASK, hcd->regs + HC_INTERRUPT_ENABLE);
+ reg_write32(hcd->regs, HC_INTERRUPT_REG, INTERRUPT_ENABLE_MASK);
+ reg_write32(hcd->regs, HC_INTERRUPT_ENABLE, INTERRUPT_ENABLE_MASK);
/*
* PORT 1 Control register of the ISP1760 is the OTG control
@@ -446,11 +502,10 @@ static int isp1760_hc_setup(struct usb_hcd *hcd)
* support in this driver, we use port 1 as a "normal" USB host port on
* both chips.
*/
- isp1760_writel(PORT1_POWER | PORT1_INIT2,
- hcd->regs + HC_PORT1_CTRL);
+ reg_write32(hcd->regs, HC_PORT1_CTRL, PORT1_POWER | PORT1_INIT2);
mdelay(10);
- priv->hcs_params = isp1760_readl(hcd->regs + HC_HCSPARAMS);
+ priv->hcs_params = reg_read32(hcd->regs, HC_HCSPARAMS);
return priv_init(hcd);
}
@@ -458,25 +513,24 @@ static int isp1760_hc_setup(struct usb_hcd *hcd)
static void isp1760_init_maps(struct usb_hcd *hcd)
{
/*set last maps, for iso its only 1, else 32 tds bitmap*/
- isp1760_writel(0x80000000, hcd->regs + HC_ATL_PTD_LASTPTD_REG);
- isp1760_writel(0x80000000, hcd->regs + HC_INT_PTD_LASTPTD_REG);
- isp1760_writel(0x00000001, hcd->regs + HC_ISO_PTD_LASTPTD_REG);
+ reg_write32(hcd->regs, HC_ATL_PTD_LASTPTD_REG, 0x80000000);
+ reg_write32(hcd->regs, HC_INT_PTD_LASTPTD_REG, 0x80000000);
+ reg_write32(hcd->regs, HC_ISO_PTD_LASTPTD_REG, 0x00000001);
}
static void isp1760_enable_interrupts(struct usb_hcd *hcd)
{
- isp1760_writel(0, hcd->regs + HC_ATL_IRQ_MASK_AND_REG);
- isp1760_writel(0, hcd->regs + HC_ATL_IRQ_MASK_OR_REG);
- isp1760_writel(0, hcd->regs + HC_INT_IRQ_MASK_AND_REG);
- isp1760_writel(0, hcd->regs + HC_INT_IRQ_MASK_OR_REG);
- isp1760_writel(0, hcd->regs + HC_ISO_IRQ_MASK_AND_REG);
- isp1760_writel(0xffffffff, hcd->regs + HC_ISO_IRQ_MASK_OR_REG);
+ reg_write32(hcd->regs, HC_ATL_IRQ_MASK_AND_REG, 0);
+ reg_write32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG, 0);
+ reg_write32(hcd->regs, HC_INT_IRQ_MASK_AND_REG, 0);
+ reg_write32(hcd->regs, HC_INT_IRQ_MASK_OR_REG, 0);
+ reg_write32(hcd->regs, HC_ISO_IRQ_MASK_AND_REG, 0);
+ reg_write32(hcd->regs, HC_ISO_IRQ_MASK_OR_REG, 0xffffffff);
/* step 23 passed */
}
static int isp1760_run(struct usb_hcd *hcd)
{
- struct isp1760_hcd *priv = hcd_to_priv(hcd);
int retval;
u32 temp;
u32 command;
@@ -486,15 +540,15 @@ static int isp1760_run(struct usb_hcd *hcd)
hcd->state = HC_STATE_RUNNING;
isp1760_enable_interrupts(hcd);
- temp = isp1760_readl(hcd->regs + HC_HW_MODE_CTRL);
- isp1760_writel(temp | HW_GLOBAL_INTR_EN, hcd->regs + HC_HW_MODE_CTRL);
+ temp = reg_read32(hcd->regs, HC_HW_MODE_CTRL);
+ reg_write32(hcd->regs, HC_HW_MODE_CTRL, temp | HW_GLOBAL_INTR_EN);
- command = isp1760_readl(hcd->regs + HC_USBCMD);
+ command = reg_read32(hcd->regs, HC_USBCMD);
command &= ~(CMD_LRESET|CMD_RESET);
command |= CMD_RUN;
- isp1760_writel(command, hcd->regs + HC_USBCMD);
+ reg_write32(hcd->regs, HC_USBCMD, command);
- retval = handshake(priv, hcd->regs + HC_USBCMD, CMD_RUN, CMD_RUN,
+ retval = handshake(hcd, HC_USBCMD, CMD_RUN, CMD_RUN,
250 * 1000);
if (retval)
return retval;
@@ -505,17 +559,16 @@ static int isp1760_run(struct usb_hcd *hcd)
* the semaphore while doing so.
*/
down_write(&ehci_cf_port_reset_rwsem);
- isp1760_writel(FLAG_CF, hcd->regs + HC_CONFIGFLAG);
+ reg_write32(hcd->regs, HC_CONFIGFLAG, FLAG_CF);
- retval = handshake(priv, hcd->regs + HC_CONFIGFLAG, FLAG_CF, FLAG_CF,
- 250 * 1000);
+ retval = handshake(hcd, HC_CONFIGFLAG, FLAG_CF, FLAG_CF, 250 * 1000);
up_write(&ehci_cf_port_reset_rwsem);
if (retval)
return retval;
- chipid = isp1760_readl(hcd->regs + HC_CHIP_ID_REG);
- isp1760_info(priv, "USB ISP %04x HW rev. %d started\n", chipid & 0xffff,
- chipid >> 16);
+ chipid = reg_read32(hcd->regs, HC_CHIP_ID_REG);
+ dev_info(hcd->self.controller, "USB ISP %04x HW rev. %d started\n",
+ chipid & 0xffff, chipid >> 16);
/* PTD Register Init Part 2, Step 28 */
/* enable INTs */
@@ -533,160 +586,156 @@ static u32 base_to_chip(u32 base)
return ((base - 0x400) >> 3);
}
-static void transform_into_atl(struct isp1760_hcd *priv, struct isp1760_qh *qh,
- struct isp1760_qtd *qtd, struct urb *urb,
- u32 payload, struct ptd *ptd)
+static int last_qtd_of_urb(struct isp1760_qtd *qtd, struct isp1760_qh *qh)
+{
+ struct urb *urb;
+
+ if (list_is_last(&qtd->qtd_list, &qh->qtd_list))
+ return 1;
+
+ urb = qtd->urb;
+ qtd = list_entry(qtd->qtd_list.next, typeof(*qtd), qtd_list);
+ return (qtd->urb != urb);
+}
+
+static void transform_into_atl(struct isp1760_qh *qh,
+ struct isp1760_qtd *qtd, struct ptd *ptd)
{
- u32 dw0;
- u32 dw1;
- u32 dw2;
- u32 dw3;
u32 maxpacket;
u32 multi;
u32 pid_code;
u32 rl = RL_COUNTER;
u32 nak = NAK_COUNTER;
+ memset(ptd, 0, sizeof(*ptd));
+
/* according to 3.6.2, max packet len can not be > 0x400 */
- maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
+ maxpacket = usb_maxpacket(qtd->urb->dev, qtd->urb->pipe,
+ usb_pipeout(qtd->urb->pipe));
multi = 1 + ((maxpacket >> 11) & 0x3);
maxpacket &= 0x7ff;
/* DW0 */
- dw0 = PTD_VALID;
- dw0 |= PTD_LENGTH(qtd->length);
- dw0 |= PTD_MAXPACKET(maxpacket);
- dw0 |= PTD_ENDPOINT(usb_pipeendpoint(urb->pipe));
- dw1 = usb_pipeendpoint(urb->pipe) >> 1;
+ ptd->dw0 = PTD_VALID;
+ ptd->dw0 |= PTD_LENGTH(qtd->length);
+ ptd->dw0 |= PTD_MAXPACKET(maxpacket);
+ ptd->dw0 |= PTD_ENDPOINT(usb_pipeendpoint(qtd->urb->pipe));
/* DW1 */
- dw1 |= PTD_DEVICE_ADDR(usb_pipedevice(urb->pipe));
+ ptd->dw1 = usb_pipeendpoint(qtd->urb->pipe) >> 1;
+ ptd->dw1 |= PTD_DEVICE_ADDR(usb_pipedevice(qtd->urb->pipe));
pid_code = qtd->packet_type;
- dw1 |= PTD_PID_TOKEN(pid_code);
+ ptd->dw1 |= PTD_PID_TOKEN(pid_code);
- if (usb_pipebulk(urb->pipe))
- dw1 |= PTD_TRANS_BULK;
- else if (usb_pipeint(urb->pipe))
- dw1 |= PTD_TRANS_INT;
+ if (usb_pipebulk(qtd->urb->pipe))
+ ptd->dw1 |= PTD_TRANS_BULK;
+ else if (usb_pipeint(qtd->urb->pipe))
+ ptd->dw1 |= PTD_TRANS_INT;
- if (urb->dev->speed != USB_SPEED_HIGH) {
+ if (qtd->urb->dev->speed != USB_SPEED_HIGH) {
/* split transaction */
- dw1 |= PTD_TRANS_SPLIT;
- if (urb->dev->speed == USB_SPEED_LOW)
- dw1 |= PTD_SE_USB_LOSPEED;
+ ptd->dw1 |= PTD_TRANS_SPLIT;
+ if (qtd->urb->dev->speed == USB_SPEED_LOW)
+ ptd->dw1 |= PTD_SE_USB_LOSPEED;
- dw1 |= PTD_PORT_NUM(urb->dev->ttport);
- dw1 |= PTD_HUB_NUM(urb->dev->tt->hub->devnum);
+ ptd->dw1 |= PTD_PORT_NUM(qtd->urb->dev->ttport);
+ ptd->dw1 |= PTD_HUB_NUM(qtd->urb->dev->tt->hub->devnum);
/* SE bit for Split INT transfers */
- if (usb_pipeint(urb->pipe) &&
- (urb->dev->speed == USB_SPEED_LOW))
- dw1 |= 2 << 16;
+ if (usb_pipeint(qtd->urb->pipe) &&
+ (qtd->urb->dev->speed == USB_SPEED_LOW))
+ ptd->dw1 |= 2 << 16;
- dw3 = 0;
+ ptd->dw3 = 0;
rl = 0;
nak = 0;
} else {
- dw0 |= PTD_MULTI(multi);
- if (usb_pipecontrol(urb->pipe) || usb_pipebulk(urb->pipe))
- dw3 = qh->ping;
+ ptd->dw0 |= PTD_MULTI(multi);
+ if (usb_pipecontrol(qtd->urb->pipe) ||
+ usb_pipebulk(qtd->urb->pipe))
+ ptd->dw3 = qh->ping;
else
- dw3 = 0;
+ ptd->dw3 = 0;
}
/* DW2 */
- dw2 = 0;
- dw2 |= PTD_DATA_START_ADDR(base_to_chip(payload));
- dw2 |= PTD_RL_CNT(rl);
- dw3 |= PTD_NAC_CNT(nak);
+ ptd->dw2 = 0;
+ ptd->dw2 |= PTD_DATA_START_ADDR(base_to_chip(qtd->payload_addr));
+ ptd->dw2 |= PTD_RL_CNT(rl);
+ ptd->dw3 |= PTD_NAC_CNT(nak);
/* DW3 */
- if (usb_pipecontrol(urb->pipe))
- dw3 |= PTD_DATA_TOGGLE(qtd->toggle);
- else
- dw3 |= qh->toggle;
-
+ ptd->dw3 |= qh->toggle;
+ if (usb_pipecontrol(qtd->urb->pipe)) {
+ if (qtd->data_buffer == qtd->urb->setup_packet)
+ ptd->dw3 &= ~PTD_DATA_TOGGLE(1);
+ else if (last_qtd_of_urb(qtd, qh))
+ ptd->dw3 |= PTD_DATA_TOGGLE(1);
+ }
- dw3 |= PTD_ACTIVE;
+ ptd->dw3 |= PTD_ACTIVE;
/* Cerr */
- dw3 |= PTD_CERR(ERR_COUNTER);
-
- memset(ptd, 0, sizeof(*ptd));
-
- ptd->dw0 = cpu_to_le32(dw0);
- ptd->dw1 = cpu_to_le32(dw1);
- ptd->dw2 = cpu_to_le32(dw2);
- ptd->dw3 = cpu_to_le32(dw3);
+ ptd->dw3 |= PTD_CERR(ERR_COUNTER);
}
-static void transform_add_int(struct isp1760_hcd *priv, struct isp1760_qh *qh,
- struct isp1760_qtd *qtd, struct urb *urb,
- u32 payload, struct ptd *ptd)
+static void transform_add_int(struct isp1760_qh *qh,
+ struct isp1760_qtd *qtd, struct ptd *ptd)
{
- u32 maxpacket;
- u32 multi;
- u32 numberofusofs;
- u32 i;
- u32 usofmask, usof;
+ u32 usof;
u32 period;
- maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
- multi = 1 + ((maxpacket >> 11) & 0x3);
- maxpacket &= 0x7ff;
- /* length of the data per uframe */
- maxpacket = multi * maxpacket;
-
- numberofusofs = urb->transfer_buffer_length / maxpacket;
- if (urb->transfer_buffer_length % maxpacket)
- numberofusofs += 1;
-
- usofmask = 1;
- usof = 0;
- for (i = 0; i < numberofusofs; i++) {
- usof |= usofmask;
- usofmask <<= 1;
- }
-
- if (urb->dev->speed != USB_SPEED_HIGH) {
- /* split */
- ptd->dw5 = cpu_to_le32(0x1c);
+ /*
+ * Most of this is guessing. ISP1761 datasheet is quite unclear, and
+ * the algorithm from the original Philips driver code, which was
+ * pretty much used in this driver before as well, is quite horrendous
+ * and, i believe, incorrect. The code below follows the datasheet and
+ * USB2.0 spec as far as I can tell, and plug/unplug seems to be much
+ * more reliable this way (fingers crossed...).
+ */
- if (qh->period >= 32)
- period = qh->period / 2;
+ if (qtd->urb->dev->speed == USB_SPEED_HIGH) {
+ /* urb->interval is in units of microframes (1/8 ms) */
+ period = qtd->urb->interval >> 3;
+
+ if (qtd->urb->interval > 4)
+ usof = 0x01; /* One bit set =>
+ interval 1 ms * uFrame-match */
+ else if (qtd->urb->interval > 2)
+ usof = 0x22; /* Two bits set => interval 1/2 ms */
+ else if (qtd->urb->interval > 1)
+ usof = 0x55; /* Four bits set => interval 1/4 ms */
else
- period = qh->period;
-
+ usof = 0xff; /* All bits set => interval 1/8 ms */
} else {
+ /* urb->interval is in units of frames (1 ms) */
+ period = qtd->urb->interval;
+ usof = 0x0f; /* Execute Start Split on any of the
+ four first uFrames */
- if (qh->period >= 8)
- period = qh->period/8;
- else
- period = qh->period;
-
- if (period >= 32)
- period = 16;
-
- if (qh->period >= 8) {
- /* millisecond period */
- period = (period << 3);
- } else {
- /* usof based tranmsfers */
- /* minimum 4 usofs */
- usof = 0x11;
- }
+ /*
+ * First 8 bits in dw5 is uSCS and "specifies which uSOF the
+ * complete split needs to be sent. Valid only for IN." Also,
+ * "All bits can be set to one for every transfer." (p 82,
+ * ISP1761 data sheet.) 0x1c is from Philips driver. Where did
+ * that number come from? 0xff seems to work fine...
+ */
+ /* ptd->dw5 = 0x1c; */
+ ptd->dw5 = 0xff; /* Execute Complete Split on any uFrame */
}
- ptd->dw2 |= cpu_to_le32(period);
- ptd->dw4 = cpu_to_le32(usof);
+ period = period >> 1;/* Ensure equal or shorter period than requested */
+ period &= 0xf8; /* Mask off too large values and lowest unused 3 bits */
+
+ ptd->dw2 |= period;
+ ptd->dw4 = usof;
}
-static void transform_into_int(struct isp1760_hcd *priv, struct isp1760_qh *qh,
- struct isp1760_qtd *qtd, struct urb *urb,
- u32 payload, struct ptd *ptd)
+static void transform_into_int(struct isp1760_qh *qh,
+ struct isp1760_qtd *qtd, struct ptd *ptd)
{
- transform_into_atl(priv, qh, qtd, urb, payload, ptd);
- transform_add_int(priv, qh, qtd, urb, payload, ptd);
+ transform_into_atl(qh, qtd, ptd);
+ transform_add_int(qh, qtd, ptd);
}
static int qtd_fill(struct isp1760_qtd *qtd, void *databuffer, size_t len,
@@ -696,10 +745,9 @@ static int qtd_fill(struct isp1760_qtd *qtd, void *databuffer, size_t len,
qtd->data_buffer = databuffer;
qtd->packet_type = GET_QTD_TOKEN_TYPE(token);
- qtd->toggle = GET_DATA_TOGGLE(token);
- if (len > HC_ATL_PL_SIZE)
- count = HC_ATL_PL_SIZE;
+ if (len > MAX_PAYLOAD_SIZE)
+ count = MAX_PAYLOAD_SIZE;
else
count = len;
@@ -707,29 +755,27 @@ static int qtd_fill(struct isp1760_qtd *qtd, void *databuffer, size_t len,
return count;
}
-static int check_error(struct ptd *ptd)
+static int check_error(struct usb_hcd *hcd, struct ptd *ptd)
{
int error = 0;
- u32 dw3;
- dw3 = le32_to_cpu(ptd->dw3);
- if (dw3 & DW3_HALT_BIT) {
+ if (ptd->dw3 & DW3_HALT_BIT) {
error = -EPIPE;
- if (dw3 & DW3_ERROR_BIT)
+ if (ptd->dw3 & DW3_ERROR_BIT)
pr_err("error bit is set in DW3\n");
}
- if (dw3 & DW3_QTD_ACTIVE) {
- printk(KERN_ERR "transfer active bit is set DW3\n");
- printk(KERN_ERR "nak counter: %d, rl: %d\n", (dw3 >> 19) & 0xf,
- (le32_to_cpu(ptd->dw2) >> 25) & 0xf);
+ if (ptd->dw3 & DW3_QTD_ACTIVE) {
+ dev_err(hcd->self.controller, "Transfer active bit is set DW3\n"
+ "nak counter: %d, rl: %d\n",
+ (ptd->dw3 >> 19) & 0xf, (ptd->dw2 >> 25) & 0xf);
}
return error;
}
-static void check_int_err_status(u32 dw4)
+static void check_int_err_status(struct usb_hcd *hcd, u32 dw4)
{
u32 i;
@@ -738,79 +784,67 @@ static void check_int_err_status(u32 dw4)
for (i = 0; i < 8; i++) {
switch (dw4 & 0x7) {
case INT_UNDERRUN:
- printk(KERN_ERR "ERROR: under run , %d\n", i);
+ dev_err(hcd->self.controller, "Underrun (%d)\n", i);
break;
case INT_EXACT:
- printk(KERN_ERR "ERROR: transaction error, %d\n", i);
+ dev_err(hcd->self.controller,
+ "Transaction error (%d)\n", i);
break;
case INT_BABBLE:
- printk(KERN_ERR "ERROR: babble error, %d\n", i);
+ dev_err(hcd->self.controller, "Babble error (%d)\n", i);
break;
}
dw4 >>= 3;
}
}
-static void enqueue_one_qtd(struct isp1760_qtd *qtd, struct isp1760_hcd *priv,
- u32 payload)
+static void enqueue_one_qtd(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
{
- u32 token;
- struct usb_hcd *hcd = priv_to_hcd(priv);
-
- token = qtd->packet_type;
-
- if (qtd->length && (qtd->length <= HC_ATL_PL_SIZE)) {
- switch (token) {
+ if (qtd->length && (qtd->length <= MAX_PAYLOAD_SIZE)) {
+ switch (qtd->packet_type) {
case IN_PID:
break;
case OUT_PID:
case SETUP_PID:
- priv_write_copy(priv, qtd->data_buffer,
- hcd->regs + payload,
- qtd->length);
+ mem_writes8(hcd->regs, qtd->payload_addr,
+ qtd->data_buffer, qtd->length);
}
}
}
-static void enqueue_one_atl_qtd(u32 atl_regs, u32 payload,
- struct isp1760_hcd *priv, struct isp1760_qh *qh,
- struct urb *urb, u32 slot, struct isp1760_qtd *qtd)
+static void enqueue_one_atl_qtd(struct usb_hcd *hcd, struct isp1760_qh *qh,
+ u32 slot, struct isp1760_qtd *qtd)
{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
struct ptd ptd;
- struct usb_hcd *hcd = priv_to_hcd(priv);
- transform_into_atl(priv, qh, qtd, urb, payload, &ptd);
- priv_write_copy(priv, (u32 *)&ptd, hcd->regs + atl_regs, sizeof(ptd));
- enqueue_one_qtd(qtd, priv, payload);
+ alloc_mem(hcd, qtd);
+ transform_into_atl(qh, qtd, &ptd);
+ ptd_write(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
+ enqueue_one_qtd(hcd, qtd);
- priv->atl_ints[slot].urb = urb;
priv->atl_ints[slot].qh = qh;
priv->atl_ints[slot].qtd = qtd;
- priv->atl_ints[slot].data_buffer = qtd->data_buffer;
- priv->atl_ints[slot].payload = payload;
- qtd->status |= URB_ENQUEUED | URB_TYPE_ATL;
+ qtd->status |= URB_ENQUEUED;
qtd->status |= slot << 16;
}
-static void enqueue_one_int_qtd(u32 int_regs, u32 payload,
- struct isp1760_hcd *priv, struct isp1760_qh *qh,
- struct urb *urb, u32 slot, struct isp1760_qtd *qtd)
+static void enqueue_one_int_qtd(struct usb_hcd *hcd, struct isp1760_qh *qh,
+ u32 slot, struct isp1760_qtd *qtd)
{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
struct ptd ptd;
- struct usb_hcd *hcd = priv_to_hcd(priv);
- transform_into_int(priv, qh, qtd, urb, payload, &ptd);
- priv_write_copy(priv, (u32 *)&ptd, hcd->regs + int_regs, sizeof(ptd));
- enqueue_one_qtd(qtd, priv, payload);
+ alloc_mem(hcd, qtd);
+ transform_into_int(qh, qtd, &ptd);
+ ptd_write(hcd->regs, INT_PTD_OFFSET, slot, &ptd);
+ enqueue_one_qtd(hcd, qtd);
- priv->int_ints[slot].urb = urb;
priv->int_ints[slot].qh = qh;
priv->int_ints[slot].qtd = qtd;
- priv->int_ints[slot].data_buffer = qtd->data_buffer;
- priv->int_ints[slot].payload = payload;
- qtd->status |= URB_ENQUEUED | URB_TYPE_INT;
+ qtd->status |= URB_ENQUEUED;
qtd->status |= slot << 16;
}
@@ -819,9 +853,7 @@ static void enqueue_an_ATL_packet(struct usb_hcd *hcd, struct isp1760_qh *qh,
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
u32 skip_map, or_map;
- u32 queue_entry;
u32 slot;
- u32 atl_regs, payload;
u32 buffstatus;
/*
@@ -832,43 +864,35 @@ static void enqueue_an_ATL_packet(struct usb_hcd *hcd, struct isp1760_qh *qh,
*/
mmiowb();
ndelay(195);
- skip_map = isp1760_readl(hcd->regs + HC_ATL_PTD_SKIPMAP_REG);
+ skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
BUG_ON(!skip_map);
slot = __ffs(skip_map);
- queue_entry = 1 << slot;
-
- atl_regs = ATL_REGS_OFFSET + slot * sizeof(struct ptd);
- payload = alloc_mem(priv, qtd->length);
+ enqueue_one_atl_qtd(hcd, qh, slot, qtd);
- enqueue_one_atl_qtd(atl_regs, payload, priv, qh, qtd->urb, slot, qtd);
+ or_map = reg_read32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG);
+ or_map |= (1 << slot);
+ reg_write32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG, or_map);
- or_map = isp1760_readl(hcd->regs + HC_ATL_IRQ_MASK_OR_REG);
- or_map |= queue_entry;
- isp1760_writel(or_map, hcd->regs + HC_ATL_IRQ_MASK_OR_REG);
-
- skip_map &= ~queue_entry;
- isp1760_writel(skip_map, hcd->regs + HC_ATL_PTD_SKIPMAP_REG);
+ skip_map &= ~(1 << slot);
+ reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
priv->atl_queued++;
if (priv->atl_queued == 2)
- isp1760_writel(INTERRUPT_ENABLE_SOT_MASK,
- hcd->regs + HC_INTERRUPT_ENABLE);
+ reg_write32(hcd->regs, HC_INTERRUPT_ENABLE,
+ INTERRUPT_ENABLE_SOT_MASK);
- buffstatus = isp1760_readl(hcd->regs + HC_BUFFER_STATUS_REG);
+ buffstatus = reg_read32(hcd->regs, HC_BUFFER_STATUS_REG);
buffstatus |= ATL_BUFFER;
- isp1760_writel(buffstatus, hcd->regs + HC_BUFFER_STATUS_REG);
+ reg_write32(hcd->regs, HC_BUFFER_STATUS_REG, buffstatus);
}
static void enqueue_an_INT_packet(struct usb_hcd *hcd, struct isp1760_qh *qh,
struct isp1760_qtd *qtd)
{
- struct isp1760_hcd *priv = hcd_to_priv(hcd);
u32 skip_map, or_map;
- u32 queue_entry;
u32 slot;
- u32 int_regs, payload;
u32 buffstatus;
/*
@@ -879,37 +903,34 @@ static void enqueue_an_INT_packet(struct usb_hcd *hcd, struct isp1760_qh *qh,
*/
mmiowb();
ndelay(195);
- skip_map = isp1760_readl(hcd->regs + HC_INT_PTD_SKIPMAP_REG);
+ skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
BUG_ON(!skip_map);
slot = __ffs(skip_map);
- queue_entry = 1 << slot;
- int_regs = INT_REGS_OFFSET + slot * sizeof(struct ptd);
+ enqueue_one_int_qtd(hcd, qh, slot, qtd);
- payload = alloc_mem(priv, qtd->length);
+ or_map = reg_read32(hcd->regs, HC_INT_IRQ_MASK_OR_REG);
+ or_map |= (1 << slot);
+ reg_write32(hcd->regs, HC_INT_IRQ_MASK_OR_REG, or_map);
- enqueue_one_int_qtd(int_regs, payload, priv, qh, qtd->urb, slot, qtd);
+ skip_map &= ~(1 << slot);
+ reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map);
- or_map = isp1760_readl(hcd->regs + HC_INT_IRQ_MASK_OR_REG);
- or_map |= queue_entry;
- isp1760_writel(or_map, hcd->regs + HC_INT_IRQ_MASK_OR_REG);
-
- skip_map &= ~queue_entry;
- isp1760_writel(skip_map, hcd->regs + HC_INT_PTD_SKIPMAP_REG);
-
- buffstatus = isp1760_readl(hcd->regs + HC_BUFFER_STATUS_REG);
+ buffstatus = reg_read32(hcd->regs, HC_BUFFER_STATUS_REG);
buffstatus |= INT_BUFFER;
- isp1760_writel(buffstatus, hcd->regs + HC_BUFFER_STATUS_REG);
+ reg_write32(hcd->regs, HC_BUFFER_STATUS_REG, buffstatus);
}
-static void isp1760_urb_done(struct isp1760_hcd *priv, struct urb *urb, int status)
+static void isp1760_urb_done(struct usb_hcd *hcd, struct urb *urb)
__releases(priv->lock)
__acquires(priv->lock)
{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+
if (!urb->unlinked) {
- if (status == -EINPROGRESS)
- status = 0;
+ if (urb->status == -EINPROGRESS)
+ urb->status = 0;
}
if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
@@ -921,22 +942,28 @@ __acquires(priv->lock)
}
/* complete() can reenter this HCD */
- usb_hcd_unlink_urb_from_ep(priv_to_hcd(priv), urb);
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock(&priv->lock);
- usb_hcd_giveback_urb(priv_to_hcd(priv), urb, status);
+ usb_hcd_giveback_urb(hcd, urb, urb->status);
spin_lock(&priv->lock);
}
static void isp1760_qtd_free(struct isp1760_qtd *qtd)
{
+ BUG_ON(qtd->payload_addr);
kmem_cache_free(qtd_cachep, qtd);
}
-static struct isp1760_qtd *clean_this_qtd(struct isp1760_qtd *qtd)
+static struct isp1760_qtd *clean_this_qtd(struct isp1760_qtd *qtd,
+ struct isp1760_qh *qh)
{
struct isp1760_qtd *tmp_qtd;
- tmp_qtd = qtd->hw_next;
+ if (list_is_last(&qtd->qtd_list, &qh->qtd_list))
+ tmp_qtd = NULL;
+ else
+ tmp_qtd = list_entry(qtd->qtd_list.next, struct isp1760_qtd,
+ qtd_list);
list_del(&qtd->qtd_list);
isp1760_qtd_free(qtd);
return tmp_qtd;
@@ -947,32 +974,26 @@ static struct isp1760_qtd *clean_this_qtd(struct isp1760_qtd *qtd)
* isn't the last one than remove also his successor(s).
* Returns the QTD which is part of an new URB and should be enqueued.
*/
-static struct isp1760_qtd *clean_up_qtdlist(struct isp1760_qtd *qtd)
+static struct isp1760_qtd *clean_up_qtdlist(struct isp1760_qtd *qtd,
+ struct isp1760_qh *qh)
{
- struct isp1760_qtd *tmp_qtd;
- int last_one;
+ struct urb *urb;
+ urb = qtd->urb;
do {
- tmp_qtd = qtd->hw_next;
- last_one = qtd->status & URB_COMPLETE_NOTIFY;
- list_del(&qtd->qtd_list);
- isp1760_qtd_free(qtd);
- qtd = tmp_qtd;
- } while (!last_one && qtd);
+ qtd = clean_this_qtd(qtd, qh);
+ } while (qtd && (qtd->urb == urb));
return qtd;
}
-static void do_atl_int(struct usb_hcd *usb_hcd)
+static void do_atl_int(struct usb_hcd *hcd)
{
- struct isp1760_hcd *priv = hcd_to_priv(usb_hcd);
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
u32 done_map, skip_map;
struct ptd ptd;
- struct urb *urb = NULL;
- u32 atl_regs_base;
- u32 atl_regs;
- u32 queue_entry;
- u32 payload;
+ struct urb *urb;
+ u32 slot;
u32 length;
u32 or_map;
u32 status = -EINVAL;
@@ -982,63 +1003,36 @@ static void do_atl_int(struct usb_hcd *usb_hcd)
u32 rl;
u32 nakcount;
- done_map = isp1760_readl(usb_hcd->regs +
- HC_ATL_PTD_DONEMAP_REG);
- skip_map = isp1760_readl(usb_hcd->regs +
- HC_ATL_PTD_SKIPMAP_REG);
+ done_map = reg_read32(hcd->regs, HC_ATL_PTD_DONEMAP_REG);
+ skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
- or_map = isp1760_readl(usb_hcd->regs + HC_ATL_IRQ_MASK_OR_REG);
+ or_map = reg_read32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG);
or_map &= ~done_map;
- isp1760_writel(or_map, usb_hcd->regs + HC_ATL_IRQ_MASK_OR_REG);
+ reg_write32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG, or_map);
- atl_regs_base = ATL_REGS_OFFSET;
while (done_map) {
- u32 dw1;
- u32 dw2;
- u32 dw3;
-
status = 0;
priv->atl_queued--;
- queue_entry = __ffs(done_map);
- done_map &= ~(1 << queue_entry);
- skip_map |= 1 << queue_entry;
+ slot = __ffs(done_map);
+ done_map &= ~(1 << slot);
+ skip_map |= (1 << slot);
- atl_regs = atl_regs_base + queue_entry * sizeof(struct ptd);
-
- urb = priv->atl_ints[queue_entry].urb;
- qtd = priv->atl_ints[queue_entry].qtd;
- qh = priv->atl_ints[queue_entry].qh;
- payload = priv->atl_ints[queue_entry].payload;
+ qtd = priv->atl_ints[slot].qtd;
+ qh = priv->atl_ints[slot].qh;
if (!qh) {
- printk(KERN_ERR "qh is 0\n");
+ dev_err(hcd->self.controller, "qh is 0\n");
continue;
}
- isp1760_writel(atl_regs + ISP_BANK(0), usb_hcd->regs +
- HC_MEMORY_REG);
- isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
- HC_MEMORY_REG);
- /*
- * write bank1 address twice to ensure the 90ns delay (time
- * between BANK0 write and the priv_read_copy() call is at
- * least 3*t_WHWL + 2*t_w11 = 3*25ns + 2*17ns = 109ns)
- */
- isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
- HC_MEMORY_REG);
-
- priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + atl_regs +
- ISP_BANK(0), sizeof(ptd));
+ ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
- dw1 = le32_to_cpu(ptd.dw1);
- dw2 = le32_to_cpu(ptd.dw2);
- dw3 = le32_to_cpu(ptd.dw3);
- rl = (dw2 >> 25) & 0x0f;
- nakcount = (dw3 >> 19) & 0xf;
+ rl = (ptd.dw2 >> 25) & 0x0f;
+ nakcount = (ptd.dw3 >> 19) & 0xf;
/* Transfer Error, *but* active and no HALT -> reload */
- if ((dw3 & DW3_ERROR_BIT) && (dw3 & DW3_QTD_ACTIVE) &&
- !(dw3 & DW3_HALT_BIT)) {
+ if ((ptd.dw3 & DW3_ERROR_BIT) && (ptd.dw3 & DW3_QTD_ACTIVE) &&
+ !(ptd.dw3 & DW3_HALT_BIT)) {
/* according to ppriv code, we have to
* reload this one if trasfered bytes != requested bytes
@@ -1047,13 +1041,14 @@ static void do_atl_int(struct usb_hcd *usb_hcd)
* triggered so far.
*/
- length = PTD_XFERRED_LENGTH(dw3);
- printk(KERN_ERR "Should reload now.... transfered %d "
+ length = PTD_XFERRED_LENGTH(ptd.dw3);
+ dev_err(hcd->self.controller,
+ "Should reload now... transferred %d "
"of %zu\n", length, qtd->length);
BUG();
}
- if (!nakcount && (dw3 & DW3_QTD_ACTIVE)) {
+ if (!nakcount && (ptd.dw3 & DW3_QTD_ACTIVE)) {
u32 buffstatus;
/*
@@ -1063,50 +1058,43 @@ static void do_atl_int(struct usb_hcd *usb_hcd)
*/
/* RL counter = ERR counter */
- dw3 &= ~(0xf << 19);
- dw3 |= rl << 19;
- dw3 &= ~(3 << (55 - 32));
- dw3 |= ERR_COUNTER << (55 - 32);
+ ptd.dw3 &= ~(0xf << 19);
+ ptd.dw3 |= rl << 19;
+ ptd.dw3 &= ~(3 << (55 - 32));
+ ptd.dw3 |= ERR_COUNTER << (55 - 32);
/*
* It is not needed to write skip map back because it
* is unchanged. Just make sure that this entry is
* unskipped once it gets written to the HW.
*/
- skip_map &= ~(1 << queue_entry);
- or_map = isp1760_readl(usb_hcd->regs +
- HC_ATL_IRQ_MASK_OR_REG);
- or_map |= 1 << queue_entry;
- isp1760_writel(or_map, usb_hcd->regs +
- HC_ATL_IRQ_MASK_OR_REG);
-
- ptd.dw3 = cpu_to_le32(dw3);
- priv_write_copy(priv, (u32 *)&ptd, usb_hcd->regs +
- atl_regs, sizeof(ptd));
+ skip_map &= ~(1 << slot);
+ or_map = reg_read32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG);
+ or_map |= 1 << slot;
+ reg_write32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG, or_map);
- ptd.dw0 |= cpu_to_le32(PTD_VALID);
- priv_write_copy(priv, (u32 *)&ptd, usb_hcd->regs +
- atl_regs, sizeof(ptd));
+ ptd.dw0 |= PTD_VALID;
+ ptd_write(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
priv->atl_queued++;
if (priv->atl_queued == 2)
- isp1760_writel(INTERRUPT_ENABLE_SOT_MASK,
- usb_hcd->regs + HC_INTERRUPT_ENABLE);
+ reg_write32(hcd->regs, HC_INTERRUPT_ENABLE,
+ INTERRUPT_ENABLE_SOT_MASK);
- buffstatus = isp1760_readl(usb_hcd->regs +
- HC_BUFFER_STATUS_REG);
+ buffstatus = reg_read32(hcd->regs,
+ HC_BUFFER_STATUS_REG);
buffstatus |= ATL_BUFFER;
- isp1760_writel(buffstatus, usb_hcd->regs +
- HC_BUFFER_STATUS_REG);
+ reg_write32(hcd->regs, HC_BUFFER_STATUS_REG,
+ buffstatus);
continue;
}
- error = check_error(&ptd);
+ error = check_error(hcd, &ptd);
if (error) {
status = error;
- priv->atl_ints[queue_entry].qh->toggle = 0;
- priv->atl_ints[queue_entry].qh->ping = 0;
- urb->status = -EPIPE;
+ priv->atl_ints[slot].qh->toggle = 0;
+ priv->atl_ints[slot].qh->ping = 0;
+ qtd->urb->status = -EPIPE;
#if 0
printk(KERN_ERR "Error in %s().\n", __func__);
@@ -1117,157 +1105,123 @@ static void do_atl_int(struct usb_hcd *usb_hcd)
ptd.dw4, ptd.dw5, ptd.dw6, ptd.dw7);
#endif
} else {
- if (usb_pipetype(urb->pipe) == PIPE_BULK) {
- priv->atl_ints[queue_entry].qh->toggle = dw3 &
- (1 << 25);
- priv->atl_ints[queue_entry].qh->ping = dw3 &
- (1 << 26);
- }
+ priv->atl_ints[slot].qh->toggle = ptd.dw3 & (1 << 25);
+ priv->atl_ints[slot].qh->ping = ptd.dw3 & (1 << 26);
}
- length = PTD_XFERRED_LENGTH(dw3);
+ length = PTD_XFERRED_LENGTH(ptd.dw3);
if (length) {
- switch (DW1_GET_PID(dw1)) {
+ switch (DW1_GET_PID(ptd.dw1)) {
case IN_PID:
- priv_read_copy(priv,
- priv->atl_ints[queue_entry].data_buffer,
- usb_hcd->regs + payload + ISP_BANK(1),
- length);
+ mem_reads8(hcd->regs, qtd->payload_addr,
+ qtd->data_buffer, length);
case OUT_PID:
- urb->actual_length += length;
+ qtd->urb->actual_length += length;
case SETUP_PID:
break;
}
}
- priv->atl_ints[queue_entry].data_buffer = NULL;
- priv->atl_ints[queue_entry].urb = NULL;
- priv->atl_ints[queue_entry].qtd = NULL;
- priv->atl_ints[queue_entry].qh = NULL;
+ priv->atl_ints[slot].qtd = NULL;
+ priv->atl_ints[slot].qh = NULL;
- free_mem(priv, payload);
+ free_mem(hcd, qtd);
- isp1760_writel(skip_map, usb_hcd->regs +
- HC_ATL_PTD_SKIPMAP_REG);
+ reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
- if (urb->status == -EPIPE) {
+ if (qtd->urb->status == -EPIPE) {
/* HALT was received */
- qtd = clean_up_qtdlist(qtd);
- isp1760_urb_done(priv, urb, urb->status);
+ urb = qtd->urb;
+ qtd = clean_up_qtdlist(qtd, qh);
+ isp1760_urb_done(hcd, urb);
- } else if (usb_pipebulk(urb->pipe) && (length < qtd->length)) {
+ } else if (usb_pipebulk(qtd->urb->pipe) &&
+ (length < qtd->length)) {
/* short BULK received */
- if (urb->transfer_flags & URB_SHORT_NOT_OK) {
- urb->status = -EREMOTEIO;
- isp1760_dbg(priv, "short bulk, %d instead %zu "
- "with URB_SHORT_NOT_OK flag.\n",
- length, qtd->length);
+ if (qtd->urb->transfer_flags & URB_SHORT_NOT_OK) {
+ qtd->urb->status = -EREMOTEIO;
+ dev_dbg(hcd->self.controller,
+ "short bulk, %d instead %zu "
+ "with URB_SHORT_NOT_OK flag.\n",
+ length, qtd->length);
}
- if (urb->status == -EINPROGRESS)
- urb->status = 0;
+ if (qtd->urb->status == -EINPROGRESS)
+ qtd->urb->status = 0;
- qtd = clean_up_qtdlist(qtd);
-
- isp1760_urb_done(priv, urb, urb->status);
+ urb = qtd->urb;
+ qtd = clean_up_qtdlist(qtd, qh);
+ isp1760_urb_done(hcd, urb);
- } else if (qtd->status & URB_COMPLETE_NOTIFY) {
+ } else if (last_qtd_of_urb(qtd, qh)) {
/* that was the last qtd of that URB */
- if (urb->status == -EINPROGRESS)
- urb->status = 0;
+ if (qtd->urb->status == -EINPROGRESS)
+ qtd->urb->status = 0;
- qtd = clean_this_qtd(qtd);
- isp1760_urb_done(priv, urb, urb->status);
+ urb = qtd->urb;
+ qtd = clean_up_qtdlist(qtd, qh);
+ isp1760_urb_done(hcd, urb);
} else {
/* next QTD of this URB */
- qtd = clean_this_qtd(qtd);
+ qtd = clean_this_qtd(qtd, qh);
BUG_ON(!qtd);
}
if (qtd)
- enqueue_an_ATL_packet(usb_hcd, qh, qtd);
+ enqueue_an_ATL_packet(hcd, qh, qtd);
- skip_map = isp1760_readl(usb_hcd->regs +
- HC_ATL_PTD_SKIPMAP_REG);
+ skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
}
if (priv->atl_queued <= 1)
- isp1760_writel(INTERRUPT_ENABLE_MASK,
- usb_hcd->regs + HC_INTERRUPT_ENABLE);
+ reg_write32(hcd->regs, HC_INTERRUPT_ENABLE,
+ INTERRUPT_ENABLE_MASK);
}
-static void do_intl_int(struct usb_hcd *usb_hcd)
+static void do_intl_int(struct usb_hcd *hcd)
{
- struct isp1760_hcd *priv = hcd_to_priv(usb_hcd);
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
u32 done_map, skip_map;
struct ptd ptd;
- struct urb *urb = NULL;
- u32 int_regs;
- u32 int_regs_base;
- u32 payload;
+ struct urb *urb;
u32 length;
u32 or_map;
int error;
- u32 queue_entry;
+ u32 slot;
struct isp1760_qtd *qtd;
struct isp1760_qh *qh;
- done_map = isp1760_readl(usb_hcd->regs +
- HC_INT_PTD_DONEMAP_REG);
- skip_map = isp1760_readl(usb_hcd->regs +
- HC_INT_PTD_SKIPMAP_REG);
+ done_map = reg_read32(hcd->regs, HC_INT_PTD_DONEMAP_REG);
+ skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
- or_map = isp1760_readl(usb_hcd->regs + HC_INT_IRQ_MASK_OR_REG);
+ or_map = reg_read32(hcd->regs, HC_INT_IRQ_MASK_OR_REG);
or_map &= ~done_map;
- isp1760_writel(or_map, usb_hcd->regs + HC_INT_IRQ_MASK_OR_REG);
-
- int_regs_base = INT_REGS_OFFSET;
+ reg_write32(hcd->regs, HC_INT_IRQ_MASK_OR_REG, or_map);
while (done_map) {
- u32 dw1;
- u32 dw3;
-
- queue_entry = __ffs(done_map);
- done_map &= ~(1 << queue_entry);
- skip_map |= 1 << queue_entry;
+ slot = __ffs(done_map);
+ done_map &= ~(1 << slot);
+ skip_map |= (1 << slot);
- int_regs = int_regs_base + queue_entry * sizeof(struct ptd);
- urb = priv->int_ints[queue_entry].urb;
- qtd = priv->int_ints[queue_entry].qtd;
- qh = priv->int_ints[queue_entry].qh;
- payload = priv->int_ints[queue_entry].payload;
+ qtd = priv->int_ints[slot].qtd;
+ qh = priv->int_ints[slot].qh;
if (!qh) {
- printk(KERN_ERR "(INT) qh is 0\n");
+ dev_err(hcd->self.controller, "(INT) qh is 0\n");
continue;
}
- isp1760_writel(int_regs + ISP_BANK(0), usb_hcd->regs +
- HC_MEMORY_REG);
- isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
- HC_MEMORY_REG);
- /*
- * write bank1 address twice to ensure the 90ns delay (time
- * between BANK0 write and the priv_read_copy() call is at
- * least 3*t_WHWL + 2*t_w11 = 3*25ns + 2*17ns = 92ns)
- */
- isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
- HC_MEMORY_REG);
-
- priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + int_regs +
- ISP_BANK(0), sizeof(ptd));
- dw1 = le32_to_cpu(ptd.dw1);
- dw3 = le32_to_cpu(ptd.dw3);
- check_int_err_status(le32_to_cpu(ptd.dw4));
+ ptd_read(hcd->regs, INT_PTD_OFFSET, slot, &ptd);
+ check_int_err_status(hcd, ptd.dw4);
- error = check_error(&ptd);
+ error = check_error(hcd, &ptd);
if (error) {
#if 0
printk(KERN_ERR "Error in %s().\n", __func__);
@@ -1277,83 +1231,77 @@ static void do_intl_int(struct usb_hcd *usb_hcd)
ptd.dw0, ptd.dw1, ptd.dw2, ptd.dw3,
ptd.dw4, ptd.dw5, ptd.dw6, ptd.dw7);
#endif
- urb->status = -EPIPE;
- priv->int_ints[queue_entry].qh->toggle = 0;
- priv->int_ints[queue_entry].qh->ping = 0;
+ qtd->urb->status = -EPIPE;
+ priv->int_ints[slot].qh->toggle = 0;
+ priv->int_ints[slot].qh->ping = 0;
} else {
- priv->int_ints[queue_entry].qh->toggle =
- dw3 & (1 << 25);
- priv->int_ints[queue_entry].qh->ping = dw3 & (1 << 26);
+ priv->int_ints[slot].qh->toggle = ptd.dw3 & (1 << 25);
+ priv->int_ints[slot].qh->ping = ptd.dw3 & (1 << 26);
}
- if (urb->dev->speed != USB_SPEED_HIGH)
- length = PTD_XFERRED_LENGTH_LO(dw3);
+ if (qtd->urb->dev->speed != USB_SPEED_HIGH)
+ length = PTD_XFERRED_LENGTH_LO(ptd.dw3);
else
- length = PTD_XFERRED_LENGTH(dw3);
+ length = PTD_XFERRED_LENGTH(ptd.dw3);
if (length) {
- switch (DW1_GET_PID(dw1)) {
+ switch (DW1_GET_PID(ptd.dw1)) {
case IN_PID:
- priv_read_copy(priv,
- priv->int_ints[queue_entry].data_buffer,
- usb_hcd->regs + payload + ISP_BANK(1),
- length);
+ mem_reads8(hcd->regs, qtd->payload_addr,
+ qtd->data_buffer, length);
case OUT_PID:
- urb->actual_length += length;
+ qtd->urb->actual_length += length;
case SETUP_PID:
break;
}
}
- priv->int_ints[queue_entry].data_buffer = NULL;
- priv->int_ints[queue_entry].urb = NULL;
- priv->int_ints[queue_entry].qtd = NULL;
- priv->int_ints[queue_entry].qh = NULL;
+ priv->int_ints[slot].qtd = NULL;
+ priv->int_ints[slot].qh = NULL;
- isp1760_writel(skip_map, usb_hcd->regs +
- HC_INT_PTD_SKIPMAP_REG);
- free_mem(priv, payload);
+ reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map);
+ free_mem(hcd, qtd);
- if (urb->status == -EPIPE) {
+ if (qtd->urb->status == -EPIPE) {
/* HALT received */
- qtd = clean_up_qtdlist(qtd);
- isp1760_urb_done(priv, urb, urb->status);
+ urb = qtd->urb;
+ qtd = clean_up_qtdlist(qtd, qh);
+ isp1760_urb_done(hcd, urb);
- } else if (qtd->status & URB_COMPLETE_NOTIFY) {
+ } else if (last_qtd_of_urb(qtd, qh)) {
- if (urb->status == -EINPROGRESS)
- urb->status = 0;
+ if (qtd->urb->status == -EINPROGRESS)
+ qtd->urb->status = 0;
- qtd = clean_this_qtd(qtd);
- isp1760_urb_done(priv, urb, urb->status);
+ urb = qtd->urb;
+ qtd = clean_up_qtdlist(qtd, qh);
+ isp1760_urb_done(hcd, urb);
} else {
/* next QTD of this URB */
- qtd = clean_this_qtd(qtd);
+ qtd = clean_this_qtd(qtd, qh);
BUG_ON(!qtd);
}
if (qtd)
- enqueue_an_INT_packet(usb_hcd, qh, qtd);
+ enqueue_an_INT_packet(hcd, qh, qtd);
- skip_map = isp1760_readl(usb_hcd->regs +
- HC_INT_PTD_SKIPMAP_REG);
+ skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
}
}
-#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
-static struct isp1760_qh *qh_make(struct isp1760_hcd *priv, struct urb *urb,
+static struct isp1760_qh *qh_make(struct usb_hcd *hcd, struct urb *urb,
gfp_t flags)
{
struct isp1760_qh *qh;
int is_input, type;
- qh = isp1760_qh_alloc(priv, flags);
+ qh = isp1760_qh_alloc(flags);
if (!qh)
return qh;
@@ -1363,29 +1311,6 @@ static struct isp1760_qh *qh_make(struct isp1760_hcd *priv, struct urb *urb,
is_input = usb_pipein(urb->pipe);
type = usb_pipetype(urb->pipe);
- if (type == PIPE_INTERRUPT) {
-
- if (urb->dev->speed == USB_SPEED_HIGH) {
-
- qh->period = urb->interval >> 3;
- if (qh->period == 0 && urb->interval != 1) {
- /* NOTE interval 2 or 4 uframes could work.
- * But interval 1 scheduling is simpler, and
- * includes high bandwidth.
- */
- printk(KERN_ERR "intr period %d uframes, NYET!",
- urb->interval);
- qh_destroy(qh);
- return NULL;
- }
- } else {
- qh->period = urb->interval;
- }
- }
-
- /* support for tt scheduling, and access to toggles */
- qh->dev = urb->dev;
-
if (!usb_pipecontrol(urb->pipe))
usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input,
1);
@@ -1398,43 +1323,27 @@ static struct isp1760_qh *qh_make(struct isp1760_hcd *priv, struct urb *urb,
* Returns null if it can't allocate a QH it needs to.
* If the QH has TDs (urbs) already, that's great.
*/
-static struct isp1760_qh *qh_append_tds(struct isp1760_hcd *priv,
+static struct isp1760_qh *qh_append_tds(struct usb_hcd *hcd,
struct urb *urb, struct list_head *qtd_list, int epnum,
void **ptr)
{
struct isp1760_qh *qh;
- struct isp1760_qtd *qtd;
- struct isp1760_qtd *prev_qtd;
qh = (struct isp1760_qh *)*ptr;
if (!qh) {
/* can't sleep here, we have priv->lock... */
- qh = qh_make(priv, urb, GFP_ATOMIC);
+ qh = qh_make(hcd, urb, GFP_ATOMIC);
if (!qh)
return qh;
*ptr = qh;
}
- qtd = list_entry(qtd_list->next, struct isp1760_qtd,
- qtd_list);
- if (!list_empty(&qh->qtd_list))
- prev_qtd = list_entry(qh->qtd_list.prev,
- struct isp1760_qtd, qtd_list);
- else
- prev_qtd = NULL;
-
list_splice(qtd_list, qh->qtd_list.prev);
- if (prev_qtd) {
- BUG_ON(prev_qtd->hw_next);
- prev_qtd->hw_next = qtd;
- }
- urb->hcpriv = qh;
return qh;
}
-static void qtd_list_free(struct isp1760_hcd *priv, struct urb *urb,
- struct list_head *qtd_list)
+static void qtd_list_free(struct urb *urb, struct list_head *qtd_list)
{
struct list_head *entry, *temp;
@@ -1447,9 +1356,10 @@ static void qtd_list_free(struct isp1760_hcd *priv, struct urb *urb,
}
}
-static int isp1760_prepare_enqueue(struct isp1760_hcd *priv, struct urb *urb,
+static int isp1760_prepare_enqueue(struct usb_hcd *hcd, struct urb *urb,
struct list_head *qtd_list, gfp_t mem_flags, packet_enqueue *p)
{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
struct isp1760_qtd *qtd;
int epnum;
unsigned long flags;
@@ -1461,11 +1371,11 @@ static int isp1760_prepare_enqueue(struct isp1760_hcd *priv, struct urb *urb,
epnum = urb->ep->desc.bEndpointAddress;
spin_lock_irqsave(&priv->lock, flags);
- if (!HCD_HW_ACCESSIBLE(priv_to_hcd(priv))) {
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
rc = -ESHUTDOWN;
goto done;
}
- rc = usb_hcd_link_urb_to_ep(priv_to_hcd(priv), urb);
+ rc = usb_hcd_link_urb_to_ep(hcd, urb);
if (rc)
goto done;
@@ -1475,25 +1385,24 @@ static int isp1760_prepare_enqueue(struct isp1760_hcd *priv, struct urb *urb,
else
qh_busy = 0;
- qh = qh_append_tds(priv, urb, qtd_list, epnum, &urb->ep->hcpriv);
+ qh = qh_append_tds(hcd, urb, qtd_list, epnum, &urb->ep->hcpriv);
if (!qh) {
- usb_hcd_unlink_urb_from_ep(priv_to_hcd(priv), urb);
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
rc = -ENOMEM;
goto done;
}
if (!qh_busy)
- p(priv_to_hcd(priv), qh, qtd);
+ p(hcd, qh, qtd);
done:
spin_unlock_irqrestore(&priv->lock, flags);
if (!qh)
- qtd_list_free(priv, urb, qtd_list);
+ qtd_list_free(urb, qtd_list);
return rc;
}
-static struct isp1760_qtd *isp1760_qtd_alloc(struct isp1760_hcd *priv,
- gfp_t flags)
+static struct isp1760_qtd *isp1760_qtd_alloc(gfp_t flags)
{
struct isp1760_qtd *qtd;
@@ -1507,10 +1416,11 @@ static struct isp1760_qtd *isp1760_qtd_alloc(struct isp1760_hcd *priv,
/*
* create a list of filled qtds for this URB; won't link into qh.
*/
-static struct list_head *qh_urb_transaction(struct isp1760_hcd *priv,
+#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
+static struct list_head *qh_urb_transaction(struct usb_hcd *hcd,
struct urb *urb, struct list_head *head, gfp_t flags)
{
- struct isp1760_qtd *qtd, *qtd_prev;
+ struct isp1760_qtd *qtd;
void *buf;
int len, maxpacket;
int is_input;
@@ -1519,7 +1429,7 @@ static struct list_head *qh_urb_transaction(struct isp1760_hcd *priv,
/*
* URBs map to sequences of QTDs: one logical transaction
*/
- qtd = isp1760_qtd_alloc(priv, flags);
+ qtd = isp1760_qtd_alloc(flags);
if (!qtd)
return NULL;
@@ -1539,13 +1449,10 @@ static struct list_head *qh_urb_transaction(struct isp1760_hcd *priv,
token | SETUP_PID);
/* ... and always at least one more pid */
- token ^= DATA_TOGGLE;
- qtd_prev = qtd;
- qtd = isp1760_qtd_alloc(priv, flags);
+ qtd = isp1760_qtd_alloc(flags);
if (!qtd)
goto cleanup;
qtd->urb = urb;
- qtd_prev->hw_next = qtd;
list_add_tail(&qtd->qtd_list, head);
/* for zero length DATA stages, STATUS is always IN */
@@ -1575,7 +1482,7 @@ static struct list_head *qh_urb_transaction(struct isp1760_hcd *priv,
if (!buf && len) {
/* XXX This looks like usb storage / SCSI bug */
- printk(KERN_ERR "buf is null, dma is %08lx len is %d\n",
+ dev_err(hcd->self.controller, "buf is null, dma is %08lx len is %d\n",
(long unsigned)urb->transfer_dma, len);
WARN_ON(1);
}
@@ -1584,19 +1491,13 @@ static struct list_head *qh_urb_transaction(struct isp1760_hcd *priv,
len -= this_qtd_len;
buf += this_qtd_len;
- /* qh makes control packets use qtd toggle; maybe switch it */
- if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
- token ^= DATA_TOGGLE;
-
if (len <= 0)
break;
- qtd_prev = qtd;
- qtd = isp1760_qtd_alloc(priv, flags);
+ qtd = isp1760_qtd_alloc(flags);
if (!qtd)
goto cleanup;
qtd->urb = urb;
- qtd_prev->hw_next = qtd;
list_add_tail(&qtd->qtd_list, head);
}
@@ -1611,20 +1512,16 @@ static struct list_head *qh_urb_transaction(struct isp1760_hcd *priv,
one_more = 1;
/* "in" <--> "out" */
token ^= IN_PID;
- /* force DATA1 */
- token |= DATA_TOGGLE;
} else if (usb_pipebulk(urb->pipe)
&& (urb->transfer_flags & URB_ZERO_PACKET)
&& !(urb->transfer_buffer_length % maxpacket)) {
one_more = 1;
}
if (one_more) {
- qtd_prev = qtd;
- qtd = isp1760_qtd_alloc(priv, flags);
+ qtd = isp1760_qtd_alloc(flags);
if (!qtd)
goto cleanup;
qtd->urb = urb;
- qtd_prev->hw_next = qtd;
list_add_tail(&qtd->qtd_list, head);
/* never any data in such packets */
@@ -1632,18 +1529,17 @@ static struct list_head *qh_urb_transaction(struct isp1760_hcd *priv,
}
}
- qtd->status = URB_COMPLETE_NOTIFY;
+ qtd->status = 0;
return head;
cleanup:
- qtd_list_free(priv, urb, head);
+ qtd_list_free(urb, head);
return NULL;
}
static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
- struct isp1760_hcd *priv = hcd_to_priv(hcd);
struct list_head qtd_list;
packet_enqueue *pe;
@@ -1652,29 +1548,27 @@ static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
case PIPE_BULK:
-
- if (!qh_urb_transaction(priv, urb, &qtd_list, mem_flags))
+ if (!qh_urb_transaction(hcd, urb, &qtd_list, mem_flags))
return -ENOMEM;
pe = enqueue_an_ATL_packet;
break;
case PIPE_INTERRUPT:
- if (!qh_urb_transaction(priv, urb, &qtd_list, mem_flags))
+ if (!qh_urb_transaction(hcd, urb, &qtd_list, mem_flags))
return -ENOMEM;
pe = enqueue_an_INT_packet;
break;
case PIPE_ISOCHRONOUS:
- printk(KERN_ERR "PIPE_ISOCHRONOUS ain't supported\n");
+ dev_err(hcd->self.controller, "PIPE_ISOCHRONOUS ain't supported\n");
default:
return -EPIPE;
}
- return isp1760_prepare_enqueue(priv, urb, &qtd_list, mem_flags, pe);
+ return isp1760_prepare_enqueue(hcd, urb, &qtd_list, mem_flags, pe);
}
-static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
- int status)
+static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
struct inter_packet_info *ints;
@@ -1691,7 +1585,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
case PIPE_INTERRUPT:
ints = priv->int_ints;
- reg_base = INT_REGS_OFFSET;
+ reg_base = INT_PTD_OFFSET;
or_reg = HC_INT_IRQ_MASK_OR_REG;
skip_reg = HC_INT_PTD_SKIPMAP_REG;
pe = enqueue_an_INT_packet;
@@ -1699,7 +1593,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
default:
ints = priv->atl_ints;
- reg_base = ATL_REGS_OFFSET;
+ reg_base = ATL_PTD_OFFSET;
or_reg = HC_ATL_IRQ_MASK_OR_REG;
skip_reg = HC_ATL_PTD_SKIPMAP_REG;
pe = enqueue_an_ATL_packet;
@@ -1710,81 +1604,84 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
spin_lock_irqsave(&priv->lock, flags);
for (i = 0; i < 32; i++) {
- if (ints->urb == urb) {
+ if (!ints[i].qh)
+ continue;
+ BUG_ON(!ints[i].qtd);
+
+ if (ints[i].qtd->urb == urb) {
u32 skip_map;
u32 or_map;
struct isp1760_qtd *qtd;
- struct isp1760_qh *qh = ints->qh;
+ struct isp1760_qh *qh;
- skip_map = isp1760_readl(hcd->regs + skip_reg);
+ skip_map = reg_read32(hcd->regs, skip_reg);
skip_map |= 1 << i;
- isp1760_writel(skip_map, hcd->regs + skip_reg);
+ reg_write32(hcd->regs, skip_reg, skip_map);
- or_map = isp1760_readl(hcd->regs + or_reg);
+ or_map = reg_read32(hcd->regs, or_reg);
or_map &= ~(1 << i);
- isp1760_writel(or_map, hcd->regs + or_reg);
+ reg_write32(hcd->regs, or_reg, or_map);
+
+ ptd_write(hcd->regs, reg_base, i, &ptd);
- priv_write_copy(priv, (u32 *)&ptd, hcd->regs + reg_base
- + i * sizeof(ptd), sizeof(ptd));
- qtd = ints->qtd;
- qtd = clean_up_qtdlist(qtd);
+ qtd = ints[i].qtd;
+ qh = ints[i].qh;
- free_mem(priv, ints->payload);
+ free_mem(hcd, qtd);
+ qtd = clean_up_qtdlist(qtd, qh);
- ints->urb = NULL;
- ints->qh = NULL;
- ints->qtd = NULL;
- ints->data_buffer = NULL;
- ints->payload = 0;
+ ints[i].qh = NULL;
+ ints[i].qtd = NULL;
- isp1760_urb_done(priv, urb, status);
+ isp1760_urb_done(hcd, urb);
if (qtd)
pe(hcd, qh, qtd);
break;
- } else if (ints->qtd) {
- struct isp1760_qtd *qtd, *prev_qtd = ints->qtd;
+ } else {
+ struct isp1760_qtd *qtd;
- for (qtd = ints->qtd->hw_next; qtd; qtd = qtd->hw_next) {
+ list_for_each_entry(qtd, &ints[i].qtd->qtd_list,
+ qtd_list) {
if (qtd->urb == urb) {
- prev_qtd->hw_next = clean_up_qtdlist(qtd);
- isp1760_urb_done(priv, urb, status);
+ clean_up_qtdlist(qtd, ints[i].qh);
+ isp1760_urb_done(hcd, urb);
+ qtd = NULL;
break;
}
- prev_qtd = qtd;
}
- /* we found the urb before the end of the list */
- if (qtd)
+
+ /* We found the urb before the last slot */
+ if (!qtd)
break;
}
- ints++;
}
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
-static irqreturn_t isp1760_irq(struct usb_hcd *usb_hcd)
+static irqreturn_t isp1760_irq(struct usb_hcd *hcd)
{
- struct isp1760_hcd *priv = hcd_to_priv(usb_hcd);
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
u32 imask;
irqreturn_t irqret = IRQ_NONE;
spin_lock(&priv->lock);
- if (!(usb_hcd->state & HC_STATE_RUNNING))
+ if (!(hcd->state & HC_STATE_RUNNING))
goto leave;
- imask = isp1760_readl(usb_hcd->regs + HC_INTERRUPT_REG);
+ imask = reg_read32(hcd->regs, HC_INTERRUPT_REG);
if (unlikely(!imask))
goto leave;
- isp1760_writel(imask, usb_hcd->regs + HC_INTERRUPT_REG);
+ reg_write32(hcd->regs, HC_INTERRUPT_REG, imask);
if (imask & (HC_ATL_INT | HC_SOT_INT))
- do_atl_int(usb_hcd);
+ do_atl_int(hcd);
if (imask & HC_INTL_INT)
- do_intl_int(usb_hcd);
+ do_intl_int(hcd);
irqret = IRQ_HANDLED;
leave:
@@ -1809,12 +1706,12 @@ static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf)
mask = PORT_CSC;
spin_lock_irqsave(&priv->lock, flags);
- temp = isp1760_readl(hcd->regs + HC_PORTSC1);
+ temp = reg_read32(hcd->regs, HC_PORTSC1);
if (temp & PORT_OWNER) {
if (temp & PORT_CSC) {
temp &= ~PORT_CSC;
- isp1760_writel(temp, hcd->regs + HC_PORTSC1);
+ reg_write32(hcd->regs, HC_PORTSC1, temp);
goto done;
}
}
@@ -1854,9 +1751,9 @@ static void isp1760_hub_descriptor(struct isp1760_hcd *priv,
temp = 1 + (ports / 8);
desc->bDescLength = 7 + 2 * temp;
- /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
- memset(&desc->bitmap[0], 0, temp);
- memset(&desc->bitmap[temp], 0xff, temp);
+ /* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
+ memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
+ memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
/* per-port overcurrent reporting */
temp = 0x0008;
@@ -1871,8 +1768,8 @@ static void isp1760_hub_descriptor(struct isp1760_hcd *priv,
#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
-static int check_reset_complete(struct isp1760_hcd *priv, int index,
- u32 __iomem *status_reg, int port_status)
+static int check_reset_complete(struct usb_hcd *hcd, int index,
+ int port_status)
{
if (!(port_status & PORT_CONNECT))
return port_status;
@@ -1880,15 +1777,17 @@ static int check_reset_complete(struct isp1760_hcd *priv, int index,
/* if reset finished and it's still not enabled -- handoff */
if (!(port_status & PORT_PE)) {
- printk(KERN_ERR "port %d full speed --> companion\n",
- index + 1);
+ dev_err(hcd->self.controller,
+ "port %d full speed --> companion\n",
+ index + 1);
port_status |= PORT_OWNER;
port_status &= ~PORT_RWC_BITS;
- isp1760_writel(port_status, status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1, port_status);
} else
- printk(KERN_ERR "port %d high speed\n", index + 1);
+ dev_err(hcd->self.controller, "port %d high speed\n",
+ index + 1);
return port_status;
}
@@ -1898,7 +1797,6 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
int ports = HCS_N_PORTS(priv->hcs_params);
- u32 __iomem *status_reg = hcd->regs + HC_PORTSC1;
u32 temp, status;
unsigned long flags;
int retval = 0;
@@ -1927,7 +1825,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
- temp = isp1760_readl(status_reg);
+ temp = reg_read32(hcd->regs, HC_PORTSC1);
/*
* Even if OWNER is set, so the port is owned by the
@@ -1938,7 +1836,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
- isp1760_writel(temp & ~PORT_PE, status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1, temp & ~PORT_PE);
break;
case USB_PORT_FEAT_C_ENABLE:
/* XXX error? */
@@ -1952,8 +1850,8 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
goto error;
/* resume signaling for 20 msec */
temp &= ~(PORT_RWC_BITS);
- isp1760_writel(temp | PORT_RESUME,
- status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1,
+ temp | PORT_RESUME);
priv->reset_done = jiffies +
msecs_to_jiffies(20);
}
@@ -1963,11 +1861,11 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
break;
case USB_PORT_FEAT_POWER:
if (HCS_PPC(priv->hcs_params))
- isp1760_writel(temp & ~PORT_POWER, status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1,
+ temp & ~PORT_POWER);
break;
case USB_PORT_FEAT_C_CONNECTION:
- isp1760_writel(temp | PORT_CSC,
- status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1, temp | PORT_CSC);
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
/* XXX error ?*/
@@ -1978,7 +1876,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
default:
goto error;
}
- isp1760_readl(hcd->regs + HC_USBCMD);
+ reg_read32(hcd->regs, HC_USBCMD);
break;
case GetHubDescriptor:
isp1760_hub_descriptor(priv, (struct usb_hub_descriptor *)
@@ -1993,7 +1891,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
goto error;
wIndex--;
status = 0;
- temp = isp1760_readl(status_reg);
+ temp = reg_read32(hcd->regs, HC_PORTSC1);
/* wPortChange bits */
if (temp & PORT_CSC)
@@ -2002,7 +1900,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
/* whoever resumes must GetPortStatus to complete it!! */
if (temp & PORT_RESUME) {
- printk(KERN_ERR "Port resume should be skipped.\n");
+ dev_err(hcd->self.controller, "Port resume should be skipped.\n");
/* Remote Wakeup received? */
if (!priv->reset_done) {
@@ -2010,8 +1908,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
priv->reset_done = jiffies
+ msecs_to_jiffies(20);
/* check the port again */
- mod_timer(&priv_to_hcd(priv)->rh_timer,
- priv->reset_done);
+ mod_timer(&hcd->rh_timer, priv->reset_done);
}
/* resume completed? */
@@ -2021,14 +1918,13 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
priv->reset_done = 0;
/* stop resume signaling */
- temp = isp1760_readl(status_reg);
- isp1760_writel(
- temp & ~(PORT_RWC_BITS | PORT_RESUME),
- status_reg);
- retval = handshake(priv, status_reg,
+ temp = reg_read32(hcd->regs, HC_PORTSC1);
+ reg_write32(hcd->regs, HC_PORTSC1,
+ temp & ~(PORT_RWC_BITS | PORT_RESUME));
+ retval = handshake(hcd, HC_PORTSC1,
PORT_RESUME, 0, 2000 /* 2msec */);
if (retval != 0) {
- isp1760_err(priv,
+ dev_err(hcd->self.controller,
"port %d resume error %d\n",
wIndex + 1, retval);
goto error;
@@ -2045,22 +1941,21 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
priv->reset_done = 0;
/* force reset to complete */
- isp1760_writel(temp & ~PORT_RESET,
- status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1, temp & ~PORT_RESET);
/* REVISIT: some hardware needs 550+ usec to clear
* this bit; seems too long to spin routinely...
*/
- retval = handshake(priv, status_reg,
+ retval = handshake(hcd, HC_PORTSC1,
PORT_RESET, 0, 750);
if (retval != 0) {
- isp1760_err(priv, "port %d reset error %d\n",
+ dev_err(hcd->self.controller, "port %d reset error %d\n",
wIndex + 1, retval);
goto error;
}
/* see what we found out */
- temp = check_reset_complete(priv, wIndex, status_reg,
- isp1760_readl(status_reg));
+ temp = check_reset_complete(hcd, wIndex,
+ reg_read32(hcd->regs, HC_PORTSC1));
}
/*
* Even if OWNER is set, there's no harm letting khubd
@@ -2069,12 +1964,12 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
*/
if (temp & PORT_OWNER)
- printk(KERN_ERR "Warning: PORT_OWNER is set\n");
+ dev_err(hcd->self.controller, "PORT_OWNER is set\n");
if (temp & PORT_CONNECT) {
status |= USB_PORT_STAT_CONNECTION;
/* status may be from integrated TT */
- status |= ehci_port_speed(priv, temp);
+ status |= USB_PORT_STAT_HIGH_SPEED;
}
if (temp & PORT_PE)
status |= USB_PORT_STAT_ENABLE;
@@ -2103,14 +1998,14 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
- temp = isp1760_readl(status_reg);
+ temp = reg_read32(hcd->regs, HC_PORTSC1);
if (temp & PORT_OWNER)
break;
/* temp &= ~PORT_RWC_BITS; */
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
- isp1760_writel(temp | PORT_PE, status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1, temp | PORT_PE);
break;
case USB_PORT_FEAT_SUSPEND:
@@ -2118,12 +2013,12 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
|| (temp & PORT_RESET) != 0)
goto error;
- isp1760_writel(temp | PORT_SUSPEND, status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1, temp | PORT_SUSPEND);
break;
case USB_PORT_FEAT_POWER:
if (HCS_PPC(priv->hcs_params))
- isp1760_writel(temp | PORT_POWER,
- status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1,
+ temp | PORT_POWER);
break;
case USB_PORT_FEAT_RESET:
if (temp & PORT_RESUME)
@@ -2146,12 +2041,12 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
priv->reset_done = jiffies +
msecs_to_jiffies(50);
}
- isp1760_writel(temp, status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1, temp);
break;
default:
goto error;
}
- isp1760_readl(hcd->regs + HC_USBCMD);
+ reg_read32(hcd->regs, HC_USBCMD);
break;
default:
@@ -2163,10 +2058,10 @@ error:
return retval;
}
-static void isp1760_endpoint_disable(struct usb_hcd *usb_hcd,
+static void isp1760_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
- struct isp1760_hcd *priv = hcd_to_priv(usb_hcd);
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
struct isp1760_qh *qh;
struct isp1760_qtd *qtd;
unsigned long flags;
@@ -2186,16 +2081,16 @@ static void isp1760_endpoint_disable(struct usb_hcd *usb_hcd,
qtd_list);
if (qtd->status & URB_ENQUEUED) {
-
spin_unlock_irqrestore(&priv->lock, flags);
- isp1760_urb_dequeue(usb_hcd, qtd->urb, -ECONNRESET);
+ isp1760_urb_dequeue(hcd, qtd->urb, -ECONNRESET);
spin_lock_irqsave(&priv->lock, flags);
} else {
struct urb *urb;
urb = qtd->urb;
- clean_up_qtdlist(qtd);
- isp1760_urb_done(priv, urb, -ECONNRESET);
+ clean_up_qtdlist(qtd, qh);
+ urb->status = -ECONNRESET;
+ isp1760_urb_done(hcd, urb);
}
} while (1);
@@ -2213,7 +2108,7 @@ static int isp1760_get_frame(struct usb_hcd *hcd)
struct isp1760_hcd *priv = hcd_to_priv(hcd);
u32 fr;
- fr = isp1760_readl(hcd->regs + HC_FRINDEX);
+ fr = reg_read32(hcd->regs, HC_FRINDEX);
return (fr >> 3) % priv->periodic_size;
}
@@ -2227,13 +2122,13 @@ static void isp1760_stop(struct usb_hcd *hcd)
mdelay(20);
spin_lock_irq(&priv->lock);
- ehci_reset(priv);
+ ehci_reset(hcd);
/* Disable IRQ */
- temp = isp1760_readl(hcd->regs + HC_HW_MODE_CTRL);
- isp1760_writel(temp &= ~HW_GLOBAL_INTR_EN, hcd->regs + HC_HW_MODE_CTRL);
+ temp = reg_read32(hcd->regs, HC_HW_MODE_CTRL);
+ reg_write32(hcd->regs, HC_HW_MODE_CTRL, temp &= ~HW_GLOBAL_INTR_EN);
spin_unlock_irq(&priv->lock);
- isp1760_writel(0, hcd->regs + HC_CONFIGFLAG);
+ reg_write32(hcd->regs, HC_CONFIGFLAG, 0);
}
static void isp1760_shutdown(struct usb_hcd *hcd)
@@ -2241,12 +2136,12 @@ static void isp1760_shutdown(struct usb_hcd *hcd)
u32 command, temp;
isp1760_stop(hcd);
- temp = isp1760_readl(hcd->regs + HC_HW_MODE_CTRL);
- isp1760_writel(temp &= ~HW_GLOBAL_INTR_EN, hcd->regs + HC_HW_MODE_CTRL);
+ temp = reg_read32(hcd->regs, HC_HW_MODE_CTRL);
+ reg_write32(hcd->regs, HC_HW_MODE_CTRL, temp &= ~HW_GLOBAL_INTR_EN);
- command = isp1760_readl(hcd->regs + HC_USBCMD);
+ command = reg_read32(hcd->regs, HC_USBCMD);
command &= ~CMD_RUN;
- isp1760_writel(command, hcd->regs + HC_USBCMD);
+ reg_write32(hcd->regs, HC_USBCMD, command);
}
static const struct hc_driver isp1760_hc_driver = {
diff --git a/drivers/usb/host/isp1760-hcd.h b/drivers/usb/host/isp1760-hcd.h
index 612bce5dce0..87050769060 100644
--- a/drivers/usb/host/isp1760-hcd.h
+++ b/drivers/usb/host/isp1760-hcd.h
@@ -84,37 +84,29 @@ void deinit_kmem_cache(void);
#define HC_INT_IRQ_MASK_AND_REG 0x328
#define HC_ATL_IRQ_MASK_AND_REG 0x32C
-/* Register sets */
-#define HC_BEGIN_OF_ATL 0x0c00
-#define HC_BEGIN_OF_INT 0x0800
-#define HC_BEGIN_OF_ISO 0x0400
-#define HC_BEGIN_OF_PAYLOAD 0x1000
-
/* urb state*/
#define DELETE_URB (0x0008)
#define NO_TRANSFER_ACTIVE (0xffffffff)
-#define ATL_REGS_OFFSET (0xc00)
-#define INT_REGS_OFFSET (0x800)
-
-/* Philips Transfer Descriptor (PTD) */
+/* Philips Proprietary Transfer Descriptor (PTD) */
+typedef __u32 __bitwise __dw;
struct ptd {
- __le32 dw0;
- __le32 dw1;
- __le32 dw2;
- __le32 dw3;
- __le32 dw4;
- __le32 dw5;
- __le32 dw6;
- __le32 dw7;
+ __dw dw0;
+ __dw dw1;
+ __dw dw2;
+ __dw dw3;
+ __dw dw4;
+ __dw dw5;
+ __dw dw6;
+ __dw dw7;
};
+#define PTD_OFFSET 0x0400
+#define ISO_PTD_OFFSET 0x0400
+#define INT_PTD_OFFSET 0x0800
+#define ATL_PTD_OFFSET 0x0c00
+#define PAYLOAD_OFFSET 0x1000
struct inter_packet_info {
- void *data_buffer;
- u32 payload;
-#define PTD_FIRE_NEXT (1 << 0)
-#define PTD_URB_FINISHED (1 << 1)
- struct urb *urb;
struct isp1760_qh *qh;
struct isp1760_qtd *qtd;
};
@@ -123,15 +115,6 @@ struct inter_packet_info {
typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh,
struct isp1760_qtd *qtd);
-#define isp1760_dbg(priv, fmt, args...) \
- dev_dbg(priv_to_hcd(priv)->self.controller, fmt, ##args)
-
-#define isp1760_info(priv, fmt, args...) \
- dev_info(priv_to_hcd(priv)->self.controller, fmt, ##args)
-
-#define isp1760_err(priv, fmt, args...) \
- dev_err(priv_to_hcd(priv)->self.controller, fmt, ##args)
-
/*
* Device flags that can vary from board to board. All of these
* indicate the most "atypical" case, so that a devflags of 0 is
@@ -168,10 +151,8 @@ struct memory_chunk {
#define BLOCK_2_SIZE 1024
#define BLOCK_3_SIZE 8192
#define BLOCKS (BLOCK_1_NUM + BLOCK_2_NUM + BLOCK_3_NUM)
-#define PAYLOAD_SIZE 0xf000
-
-/* I saw if some reloads if the pointer was negative */
-#define ISP1760_NULL_POINTER (0x400)
+#define MAX_PAYLOAD_SIZE BLOCK_3_SIZE
+#define PAYLOAD_AREA_SIZE 0xf000
/* ATL */
/* DW0 */
@@ -225,6 +206,4 @@ struct memory_chunk {
#define NAK_COUNTER (0)
#define ERR_COUNTER (2)
-#define HC_ATL_PL_SIZE (8192)
-
#endif
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 759a12ff804..fb035751e4b 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -75,6 +75,7 @@ static const char hcd_name [] = "ohci_hcd";
#define STATECHANGE_DELAY msecs_to_jiffies(300)
#include "ohci.h"
+#include "pci-quirks.h"
static void ohci_dump (struct ohci_hcd *ohci, int verbose);
static int ohci_init (struct ohci_hcd *ohci);
@@ -85,18 +86,8 @@ static int ohci_restart (struct ohci_hcd *ohci);
#endif
#ifdef CONFIG_PCI
-static void quirk_amd_pll(int state);
-static void amd_iso_dev_put(void);
static void sb800_prefetch(struct ohci_hcd *ohci, int on);
#else
-static inline void quirk_amd_pll(int state)
-{
- return;
-}
-static inline void amd_iso_dev_put(void)
-{
- return;
-}
static inline void sb800_prefetch(struct ohci_hcd *ohci, int on)
{
return;
@@ -912,7 +903,7 @@ static void ohci_stop (struct usb_hcd *hcd)
if (quirk_zfmicro(ohci))
del_timer(&ohci->unlink_watchdog);
if (quirk_amdiso(ohci))
- amd_iso_dev_put();
+ usb_amd_dev_put();
remove_debug_files (ohci);
ohci_mem_cleanup (ohci);
@@ -1068,10 +1059,7 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ohci_hcd_da8xx_driver
#endif
-#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
- defined(CONFIG_CPU_SUBTYPE_SH7721) || \
- defined(CONFIG_CPU_SUBTYPE_SH7763) || \
- defined(CONFIG_CPU_SUBTYPE_SH7786)
+#ifdef CONFIG_USB_OHCI_SH
#include "ohci-sh.c"
#define PLATFORM_DRIVER ohci_hcd_sh_driver
#endif
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index cddcda95b57..9154615292d 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -580,15 +580,16 @@ ohci_hub_descriptor (
temp |= 0x0008;
desc->wHubCharacteristics = (__force __u16)cpu_to_hc16(ohci, temp);
- /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
+ /* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
rh = roothub_b (ohci);
- memset(desc->bitmap, 0xff, sizeof(desc->bitmap));
- desc->bitmap [0] = rh & RH_B_DR;
+ memset(desc->u.hs.DeviceRemovable, 0xff,
+ sizeof(desc->u.hs.DeviceRemovable));
+ desc->u.hs.DeviceRemovable[0] = rh & RH_B_DR;
if (ohci->num_ports > 7) {
- desc->bitmap [1] = (rh & RH_B_DR) >> 8;
- desc->bitmap [2] = 0xff;
+ desc->u.hs.DeviceRemovable[1] = (rh & RH_B_DR) >> 8;
+ desc->u.hs.DeviceRemovable[2] = 0xff;
} else
- desc->bitmap [1] = 0xff;
+ desc->u.hs.DeviceRemovable[1] = 0xff;
}
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/ohci-omap3.c b/drivers/usb/host/ohci-omap3.c
index a37d5993e4e..6048f2f64f7 100644
--- a/drivers/usb/host/ohci-omap3.c
+++ b/drivers/usb/host/ohci-omap3.c
@@ -7,6 +7,7 @@
* Copyright (C) 2007-2010 Texas Instruments, Inc.
* Author: Vikram Pandita <vikram.pandita@ti.com>
* Author: Anand Gadiyar <gadiyar@ti.com>
+ * Author: Keshava Munegowda <keshava_mgowda@ti.com>
*
* Based on ehci-omap.c and some other ohci glue layers
*
@@ -24,150 +25,15 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
- * TODO (last updated Mar 10th, 2010):
+ * TODO (last updated Feb 27, 2011):
* - add kernel-doc
- * - Factor out code common to EHCI to a separate file
- * - Make EHCI and OHCI coexist together
- * - needs newer silicon versions to actually work
- * - the last one to be loaded currently steps on the other's toes
- * - Add hooks for configuring transceivers, etc. at init/exit
- * - Add aggressive clock-management code
*/
#include <linux/platform_device.h>
-#include <linux/clk.h>
-
#include <plat/usb.h>
-/*
- * OMAP USBHOST Register addresses: VIRTUAL ADDRESSES
- * Use ohci_omap_readl()/ohci_omap_writel() functions
- */
-
-/* TLL Register Set */
-#define OMAP_USBTLL_REVISION (0x00)
-#define OMAP_USBTLL_SYSCONFIG (0x10)
-#define OMAP_USBTLL_SYSCONFIG_CACTIVITY (1 << 8)
-#define OMAP_USBTLL_SYSCONFIG_SIDLEMODE (1 << 3)
-#define OMAP_USBTLL_SYSCONFIG_ENAWAKEUP (1 << 2)
-#define OMAP_USBTLL_SYSCONFIG_SOFTRESET (1 << 1)
-#define OMAP_USBTLL_SYSCONFIG_AUTOIDLE (1 << 0)
-
-#define OMAP_USBTLL_SYSSTATUS (0x14)
-#define OMAP_USBTLL_SYSSTATUS_RESETDONE (1 << 0)
-
-#define OMAP_USBTLL_IRQSTATUS (0x18)
-#define OMAP_USBTLL_IRQENABLE (0x1C)
-
-#define OMAP_TLL_SHARED_CONF (0x30)
-#define OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN (1 << 6)
-#define OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN (1 << 5)
-#define OMAP_TLL_SHARED_CONF_USB_DIVRATION (1 << 2)
-#define OMAP_TLL_SHARED_CONF_FCLK_REQ (1 << 1)
-#define OMAP_TLL_SHARED_CONF_FCLK_IS_ON (1 << 0)
-
-#define OMAP_TLL_CHANNEL_CONF(num) (0x040 + 0x004 * num)
-#define OMAP_TLL_CHANNEL_CONF_FSLSMODE_SHIFT 24
-#define OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF (1 << 11)
-#define OMAP_TLL_CHANNEL_CONF_ULPI_ULPIAUTOIDLE (1 << 10)
-#define OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE (1 << 9)
-#define OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE (1 << 8)
-#define OMAP_TLL_CHANNEL_CONF_CHANMODE_FSLS (1 << 1)
-#define OMAP_TLL_CHANNEL_CONF_CHANEN (1 << 0)
-
-#define OMAP_TLL_CHANNEL_COUNT 3
-
-/* UHH Register Set */
-#define OMAP_UHH_REVISION (0x00)
-#define OMAP_UHH_SYSCONFIG (0x10)
-#define OMAP_UHH_SYSCONFIG_MIDLEMODE (1 << 12)
-#define OMAP_UHH_SYSCONFIG_CACTIVITY (1 << 8)
-#define OMAP_UHH_SYSCONFIG_SIDLEMODE (1 << 3)
-#define OMAP_UHH_SYSCONFIG_ENAWAKEUP (1 << 2)
-#define OMAP_UHH_SYSCONFIG_SOFTRESET (1 << 1)
-#define OMAP_UHH_SYSCONFIG_AUTOIDLE (1 << 0)
-
-#define OMAP_UHH_SYSSTATUS (0x14)
-#define OMAP_UHH_SYSSTATUS_UHHRESETDONE (1 << 0)
-#define OMAP_UHH_SYSSTATUS_OHCIRESETDONE (1 << 1)
-#define OMAP_UHH_SYSSTATUS_EHCIRESETDONE (1 << 2)
-#define OMAP_UHH_HOSTCONFIG (0x40)
-#define OMAP_UHH_HOSTCONFIG_ULPI_BYPASS (1 << 0)
-#define OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS (1 << 0)
-#define OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS (1 << 11)
-#define OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS (1 << 12)
-#define OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN (1 << 2)
-#define OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN (1 << 3)
-#define OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN (1 << 4)
-#define OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN (1 << 5)
-#define OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS (1 << 8)
-#define OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS (1 << 9)
-#define OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS (1 << 10)
-
-#define OMAP_UHH_DEBUG_CSR (0x44)
-
/*-------------------------------------------------------------------------*/
-static inline void ohci_omap_writel(void __iomem *base, u32 reg, u32 val)
-{
- __raw_writel(val, base + reg);
-}
-
-static inline u32 ohci_omap_readl(void __iomem *base, u32 reg)
-{
- return __raw_readl(base + reg);
-}
-
-static inline void ohci_omap_writeb(void __iomem *base, u8 reg, u8 val)
-{
- __raw_writeb(val, base + reg);
-}
-
-static inline u8 ohci_omap_readb(void __iomem *base, u8 reg)
-{
- return __raw_readb(base + reg);
-}
-
-/*-------------------------------------------------------------------------*/
-
-struct ohci_hcd_omap3 {
- struct ohci_hcd *ohci;
- struct device *dev;
-
- struct clk *usbhost_ick;
- struct clk *usbhost2_120m_fck;
- struct clk *usbhost1_48m_fck;
- struct clk *usbtll_fck;
- struct clk *usbtll_ick;
-
- /* port_mode: TLL/PHY, 2/3/4/6-PIN, DP-DM/DAT-SE0 */
- enum ohci_omap3_port_mode port_mode[OMAP3_HS_USB_PORTS];
- void __iomem *uhh_base;
- void __iomem *tll_base;
- void __iomem *ohci_base;
-
- unsigned es2_compatibility:1;
-};
-
-/*-------------------------------------------------------------------------*/
-
-static void ohci_omap3_clock_power(struct ohci_hcd_omap3 *omap, int on)
-{
- if (on) {
- clk_enable(omap->usbtll_ick);
- clk_enable(omap->usbtll_fck);
- clk_enable(omap->usbhost_ick);
- clk_enable(omap->usbhost1_48m_fck);
- clk_enable(omap->usbhost2_120m_fck);
- } else {
- clk_disable(omap->usbhost2_120m_fck);
- clk_disable(omap->usbhost1_48m_fck);
- clk_disable(omap->usbhost_ick);
- clk_disable(omap->usbtll_fck);
- clk_disable(omap->usbtll_ick);
- }
-}
-
static int ohci_omap3_init(struct usb_hcd *hcd)
{
dev_dbg(hcd->self.controller, "starting OHCI controller\n");
@@ -175,7 +41,6 @@ static int ohci_omap3_init(struct usb_hcd *hcd)
return ohci_init(hcd_to_ohci(hcd));
}
-
/*-------------------------------------------------------------------------*/
static int ohci_omap3_start(struct usb_hcd *hcd)
@@ -202,325 +67,6 @@ static int ohci_omap3_start(struct usb_hcd *hcd)
/*-------------------------------------------------------------------------*/
-/*
- * convert the port-mode enum to a value we can use in the FSLSMODE
- * field of USBTLL_CHANNEL_CONF
- */
-static unsigned ohci_omap3_fslsmode(enum ohci_omap3_port_mode mode)
-{
- switch (mode) {
- case OMAP_OHCI_PORT_MODE_UNUSED:
- case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0:
- return 0x0;
-
- case OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM:
- return 0x1;
-
- case OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0:
- return 0x2;
-
- case OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM:
- return 0x3;
-
- case OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0:
- return 0x4;
-
- case OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM:
- return 0x5;
-
- case OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0:
- return 0x6;
-
- case OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM:
- return 0x7;
-
- case OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0:
- return 0xA;
-
- case OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM:
- return 0xB;
- default:
- pr_warning("Invalid port mode, using default\n");
- return 0x0;
- }
-}
-
-static void ohci_omap3_tll_config(struct ohci_hcd_omap3 *omap)
-{
- u32 reg;
- int i;
-
- /* Program TLL SHARED CONF */
- reg = ohci_omap_readl(omap->tll_base, OMAP_TLL_SHARED_CONF);
- reg &= ~OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN;
- reg &= ~OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN;
- reg |= OMAP_TLL_SHARED_CONF_USB_DIVRATION;
- reg |= OMAP_TLL_SHARED_CONF_FCLK_IS_ON;
- ohci_omap_writel(omap->tll_base, OMAP_TLL_SHARED_CONF, reg);
-
- /* Program each TLL channel */
- /*
- * REVISIT: Only the 3-pin and 4-pin PHY modes have
- * actually been tested.
- */
- for (i = 0; i < OMAP_TLL_CHANNEL_COUNT; i++) {
-
- /* Enable only those channels that are actually used */
- if (omap->port_mode[i] == OMAP_OHCI_PORT_MODE_UNUSED)
- continue;
-
- reg = ohci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i));
- reg |= ohci_omap3_fslsmode(omap->port_mode[i])
- << OMAP_TLL_CHANNEL_CONF_FSLSMODE_SHIFT;
- reg |= OMAP_TLL_CHANNEL_CONF_CHANMODE_FSLS;
- reg |= OMAP_TLL_CHANNEL_CONF_CHANEN;
- ohci_omap_writel(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i), reg);
- }
-}
-
-/* omap3_start_ohci
- * - Start the TI USBHOST controller
- */
-static int omap3_start_ohci(struct ohci_hcd_omap3 *omap, struct usb_hcd *hcd)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(1000);
- u32 reg = 0;
- int ret = 0;
-
- dev_dbg(omap->dev, "starting TI OHCI USB Controller\n");
-
- /* Get all the clock handles we need */
- omap->usbhost_ick = clk_get(omap->dev, "usbhost_ick");
- if (IS_ERR(omap->usbhost_ick)) {
- dev_err(omap->dev, "could not get usbhost_ick\n");
- ret = PTR_ERR(omap->usbhost_ick);
- goto err_host_ick;
- }
-
- omap->usbhost2_120m_fck = clk_get(omap->dev, "usbhost_120m_fck");
- if (IS_ERR(omap->usbhost2_120m_fck)) {
- dev_err(omap->dev, "could not get usbhost_120m_fck\n");
- ret = PTR_ERR(omap->usbhost2_120m_fck);
- goto err_host_120m_fck;
- }
-
- omap->usbhost1_48m_fck = clk_get(omap->dev, "usbhost_48m_fck");
- if (IS_ERR(omap->usbhost1_48m_fck)) {
- dev_err(omap->dev, "could not get usbhost_48m_fck\n");
- ret = PTR_ERR(omap->usbhost1_48m_fck);
- goto err_host_48m_fck;
- }
-
- omap->usbtll_fck = clk_get(omap->dev, "usbtll_fck");
- if (IS_ERR(omap->usbtll_fck)) {
- dev_err(omap->dev, "could not get usbtll_fck\n");
- ret = PTR_ERR(omap->usbtll_fck);
- goto err_tll_fck;
- }
-
- omap->usbtll_ick = clk_get(omap->dev, "usbtll_ick");
- if (IS_ERR(omap->usbtll_ick)) {
- dev_err(omap->dev, "could not get usbtll_ick\n");
- ret = PTR_ERR(omap->usbtll_ick);
- goto err_tll_ick;
- }
-
- /* Now enable all the clocks in the correct order */
- ohci_omap3_clock_power(omap, 1);
-
- /* perform TLL soft reset, and wait until reset is complete */
- ohci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
- OMAP_USBTLL_SYSCONFIG_SOFTRESET);
-
- /* Wait for TLL reset to complete */
- while (!(ohci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
- & OMAP_USBTLL_SYSSTATUS_RESETDONE)) {
- cpu_relax();
-
- if (time_after(jiffies, timeout)) {
- dev_dbg(omap->dev, "operation timed out\n");
- ret = -EINVAL;
- goto err_sys_status;
- }
- }
-
- dev_dbg(omap->dev, "TLL reset done\n");
-
- /* (1<<3) = no idle mode only for initial debugging */
- ohci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
- OMAP_USBTLL_SYSCONFIG_ENAWAKEUP |
- OMAP_USBTLL_SYSCONFIG_SIDLEMODE |
- OMAP_USBTLL_SYSCONFIG_CACTIVITY);
-
-
- /* Put UHH in NoIdle/NoStandby mode */
- reg = ohci_omap_readl(omap->uhh_base, OMAP_UHH_SYSCONFIG);
- reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
- | OMAP_UHH_SYSCONFIG_SIDLEMODE
- | OMAP_UHH_SYSCONFIG_CACTIVITY
- | OMAP_UHH_SYSCONFIG_MIDLEMODE);
- reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
- reg &= ~OMAP_UHH_SYSCONFIG_SOFTRESET;
-
- ohci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
-
- reg = ohci_omap_readl(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
-
- /* setup ULPI bypass and burst configurations */
- reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN
- | OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN
- | OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN);
- reg &= ~OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN;
-
- /*
- * REVISIT: Pi_CONNECT_STATUS controls MStandby
- * assertion and Swakeup generation - let us not
- * worry about this for now. OMAP HWMOD framework
- * might take care of this later. If not, we can
- * update these registers when adding aggressive
- * clock management code.
- *
- * For now, turn off all the Pi_CONNECT_STATUS bits
- *
- if (omap->port_mode[0] == OMAP_OHCI_PORT_MODE_UNUSED)
- reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS;
- if (omap->port_mode[1] == OMAP_OHCI_PORT_MODE_UNUSED)
- reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS;
- if (omap->port_mode[2] == OMAP_OHCI_PORT_MODE_UNUSED)
- reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
- */
- reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS;
- reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS;
- reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
-
- if (omap->es2_compatibility) {
- /*
- * All OHCI modes need to go through the TLL,
- * unlike in the EHCI case. So use UTMI mode
- * for all ports for OHCI, on ES2.x silicon
- */
- dev_dbg(omap->dev, "OMAP3 ES version <= ES2.1\n");
- reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
- } else {
- dev_dbg(omap->dev, "OMAP3 ES version > ES2.1\n");
- if (omap->port_mode[0] == OMAP_OHCI_PORT_MODE_UNUSED)
- reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
- else
- reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
-
- if (omap->port_mode[1] == OMAP_OHCI_PORT_MODE_UNUSED)
- reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
- else
- reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
-
- if (omap->port_mode[2] == OMAP_OHCI_PORT_MODE_UNUSED)
- reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
- else
- reg |= OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
-
- }
- ohci_omap_writel(omap->uhh_base, OMAP_UHH_HOSTCONFIG, reg);
- dev_dbg(omap->dev, "UHH setup done, uhh_hostconfig=%x\n", reg);
-
- ohci_omap3_tll_config(omap);
-
- return 0;
-
-err_sys_status:
- ohci_omap3_clock_power(omap, 0);
- clk_put(omap->usbtll_ick);
-
-err_tll_ick:
- clk_put(omap->usbtll_fck);
-
-err_tll_fck:
- clk_put(omap->usbhost1_48m_fck);
-
-err_host_48m_fck:
- clk_put(omap->usbhost2_120m_fck);
-
-err_host_120m_fck:
- clk_put(omap->usbhost_ick);
-
-err_host_ick:
- return ret;
-}
-
-static void omap3_stop_ohci(struct ohci_hcd_omap3 *omap, struct usb_hcd *hcd)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(100);
-
- dev_dbg(omap->dev, "stopping TI EHCI USB Controller\n");
-
- /* Reset USBHOST for insmod/rmmod to work */
- ohci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG,
- OMAP_UHH_SYSCONFIG_SOFTRESET);
- while (!(ohci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
- & OMAP_UHH_SYSSTATUS_UHHRESETDONE)) {
- cpu_relax();
-
- if (time_after(jiffies, timeout))
- dev_dbg(omap->dev, "operation timed out\n");
- }
-
- while (!(ohci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
- & OMAP_UHH_SYSSTATUS_OHCIRESETDONE)) {
- cpu_relax();
-
- if (time_after(jiffies, timeout))
- dev_dbg(omap->dev, "operation timed out\n");
- }
-
- while (!(ohci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
- & OMAP_UHH_SYSSTATUS_EHCIRESETDONE)) {
- cpu_relax();
-
- if (time_after(jiffies, timeout))
- dev_dbg(omap->dev, "operation timed out\n");
- }
-
- ohci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG, (1 << 1));
-
- while (!(ohci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
- & (1 << 0))) {
- cpu_relax();
-
- if (time_after(jiffies, timeout))
- dev_dbg(omap->dev, "operation timed out\n");
- }
-
- ohci_omap3_clock_power(omap, 0);
-
- if (omap->usbtll_fck != NULL) {
- clk_put(omap->usbtll_fck);
- omap->usbtll_fck = NULL;
- }
-
- if (omap->usbhost_ick != NULL) {
- clk_put(omap->usbhost_ick);
- omap->usbhost_ick = NULL;
- }
-
- if (omap->usbhost1_48m_fck != NULL) {
- clk_put(omap->usbhost1_48m_fck);
- omap->usbhost1_48m_fck = NULL;
- }
-
- if (omap->usbhost2_120m_fck != NULL) {
- clk_put(omap->usbhost2_120m_fck);
- omap->usbhost2_120m_fck = NULL;
- }
-
- if (omap->usbtll_ick != NULL) {
- clk_put(omap->usbtll_ick);
- omap->usbtll_ick = NULL;
- }
-
- dev_dbg(omap->dev, "Clock to USB host has been disabled\n");
-}
-
-/*-------------------------------------------------------------------------*/
-
static const struct hc_driver ohci_omap3_hc_driver = {
.description = hcd_name,
.product_desc = "OMAP3 OHCI Host Controller",
@@ -580,107 +126,77 @@ static const struct hc_driver ohci_omap3_hc_driver = {
*/
static int __devinit ohci_hcd_omap3_probe(struct platform_device *pdev)
{
- struct ohci_hcd_omap_platform_data *pdata = pdev->dev.platform_data;
- struct ohci_hcd_omap3 *omap;
- struct resource *res;
- struct usb_hcd *hcd;
- int ret = -ENODEV;
- int irq;
+ struct device *dev = &pdev->dev;
+ struct usb_hcd *hcd = NULL;
+ void __iomem *regs = NULL;
+ struct resource *res;
+ int ret = -ENODEV;
+ int irq;
if (usb_disabled())
- goto err_disabled;
+ goto err_end;
- if (!pdata) {
- dev_dbg(&pdev->dev, "missing platform_data\n");
- goto err_pdata;
+ if (!dev->parent) {
+ dev_err(dev, "Missing parent device\n");
+ return -ENODEV;
}
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_byname(pdev, "ohci-irq");
+ if (irq < 0) {
+ dev_err(dev, "OHCI irq failed\n");
+ return -ENODEV;
+ }
- omap = kzalloc(sizeof(*omap), GFP_KERNEL);
- if (!omap) {
- ret = -ENOMEM;
- goto err_disabled;
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "ohci");
+ if (!ret) {
+ dev_err(dev, "UHH OHCI get resource failed\n");
+ return -ENOMEM;
}
- hcd = usb_create_hcd(&ohci_omap3_hc_driver, &pdev->dev,
- dev_name(&pdev->dev));
- if (!hcd) {
- ret = -ENOMEM;
- goto err_create_hcd;
+ regs = ioremap(res->start, resource_size(res));
+ if (!regs) {
+ dev_err(dev, "UHH OHCI ioremap failed\n");
+ return -ENOMEM;
}
- platform_set_drvdata(pdev, omap);
- omap->dev = &pdev->dev;
- omap->port_mode[0] = pdata->port_mode[0];
- omap->port_mode[1] = pdata->port_mode[1];
- omap->port_mode[2] = pdata->port_mode[2];
- omap->es2_compatibility = pdata->es2_compatibility;
- omap->ohci = hcd_to_ohci(hcd);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hcd = usb_create_hcd(&ohci_omap3_hc_driver, dev,
+ dev_name(dev));
+ if (!hcd) {
+ dev_err(dev, "usb_create_hcd failed\n");
+ goto err_io;
+ }
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
+ hcd->regs = regs;
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- dev_err(&pdev->dev, "OHCI ioremap failed\n");
- ret = -ENOMEM;
- goto err_ioremap;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- omap->uhh_base = ioremap(res->start, resource_size(res));
- if (!omap->uhh_base) {
- dev_err(&pdev->dev, "UHH ioremap failed\n");
- ret = -ENOMEM;
- goto err_uhh_ioremap;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- omap->tll_base = ioremap(res->start, resource_size(res));
- if (!omap->tll_base) {
- dev_err(&pdev->dev, "TLL ioremap failed\n");
- ret = -ENOMEM;
- goto err_tll_ioremap;
- }
-
- ret = omap3_start_ohci(omap, hcd);
+ ret = omap_usbhs_enable(dev);
if (ret) {
- dev_dbg(&pdev->dev, "failed to start ohci\n");
- goto err_start;
+ dev_dbg(dev, "failed to start ohci\n");
+ goto err_end;
}
- ohci_hcd_init(omap->ohci);
+ ohci_hcd_init(hcd_to_ohci(hcd));
ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
if (ret) {
- dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
+ dev_dbg(dev, "failed to add hcd with err %d\n", ret);
goto err_add_hcd;
}
return 0;
err_add_hcd:
- omap3_stop_ohci(omap, hcd);
-
-err_start:
- iounmap(omap->tll_base);
-
-err_tll_ioremap:
- iounmap(omap->uhh_base);
-
-err_uhh_ioremap:
- iounmap(hcd->regs);
+ omap_usbhs_disable(dev);
-err_ioremap:
+err_end:
usb_put_hcd(hcd);
-err_create_hcd:
- kfree(omap);
-err_pdata:
-err_disabled:
+err_io:
+ iounmap(regs);
+
return ret;
}
@@ -699,24 +215,20 @@ err_disabled:
*/
static int __devexit ohci_hcd_omap3_remove(struct platform_device *pdev)
{
- struct ohci_hcd_omap3 *omap = platform_get_drvdata(pdev);
- struct usb_hcd *hcd = ohci_to_hcd(omap->ohci);
+ struct device *dev = &pdev->dev;
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
- usb_remove_hcd(hcd);
- omap3_stop_ohci(omap, hcd);
iounmap(hcd->regs);
- iounmap(omap->tll_base);
- iounmap(omap->uhh_base);
+ usb_remove_hcd(hcd);
+ omap_usbhs_disable(dev);
usb_put_hcd(hcd);
- kfree(omap);
return 0;
}
static void ohci_hcd_omap3_shutdown(struct platform_device *pdev)
{
- struct ohci_hcd_omap3 *omap = platform_get_drvdata(pdev);
- struct usb_hcd *hcd = ohci_to_hcd(omap->ohci);
+ struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev);
if (hcd->driver->shutdown)
hcd->driver->shutdown(hcd);
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 36ee9a666e9..d84d6f0314f 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -22,24 +22,6 @@
#include <linux/io.h>
-/* constants used to work around PM-related transfer
- * glitches in some AMD 700 series southbridges
- */
-#define AB_REG_BAR 0xf0
-#define AB_INDX(addr) ((addr) + 0x00)
-#define AB_DATA(addr) ((addr) + 0x04)
-#define AX_INDXC 0X30
-#define AX_DATAC 0x34
-
-#define NB_PCIE_INDX_ADDR 0xe0
-#define NB_PCIE_INDX_DATA 0xe4
-#define PCIE_P_CNTL 0x10040
-#define BIF_NB 0x10002
-
-static struct pci_dev *amd_smbus_dev;
-static struct pci_dev *amd_hb_dev;
-static int amd_ohci_iso_count;
-
/*-------------------------------------------------------------------------*/
static int broken_suspend(struct usb_hcd *hcd)
@@ -168,15 +150,18 @@ static int ohci_quirk_nec(struct usb_hcd *hcd)
static int ohci_quirk_amd700(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- u8 rev = 0;
+ struct pci_dev *amd_smbus_dev;
+ u8 rev;
- if (!amd_smbus_dev)
- amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
- PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
+ if (usb_amd_find_chipset_info())
+ ohci->flags |= OHCI_QUIRK_AMD_PLL;
+
+ amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
+ PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
if (!amd_smbus_dev)
return 0;
- pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
+ rev = amd_smbus_dev->revision;
/* SB800 needs pre-fetch fix */
if ((rev >= 0x40) && (rev <= 0x4f)) {
@@ -184,19 +169,8 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
ohci_dbg(ohci, "enabled AMD prefetch quirk\n");
}
- if ((rev > 0x3b) || (rev < 0x30)) {
- pci_dev_put(amd_smbus_dev);
- amd_smbus_dev = NULL;
- return 0;
- }
-
- amd_ohci_iso_count++;
-
- if (!amd_hb_dev)
- amd_hb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9600, NULL);
-
- ohci->flags |= OHCI_QUIRK_AMD_ISO;
- ohci_dbg(ohci, "enabled AMD ISO transfers quirk\n");
+ pci_dev_put(amd_smbus_dev);
+ amd_smbus_dev = NULL;
return 0;
}
@@ -215,74 +189,6 @@ static int ohci_quirk_nvidia_shutdown(struct usb_hcd *hcd)
return 0;
}
-/*
- * The hardware normally enables the A-link power management feature, which
- * lets the system lower the power consumption in idle states.
- *
- * Assume the system is configured to have USB 1.1 ISO transfers going
- * to or from a USB device. Without this quirk, that stream may stutter
- * or have breaks occasionally. For transfers going to speakers, this
- * makes a very audible mess...
- *
- * That audio playback corruption is due to the audio stream getting
- * interrupted occasionally when the link goes in lower power state
- * This USB quirk prevents the link going into that lower power state
- * during audio playback or other ISO operations.
- */
-static void quirk_amd_pll(int on)
-{
- u32 addr;
- u32 val;
- u32 bit = (on > 0) ? 1 : 0;
-
- pci_read_config_dword(amd_smbus_dev, AB_REG_BAR, &addr);
-
- /* BIT names/meanings are NDA-protected, sorry ... */
-
- outl(AX_INDXC, AB_INDX(addr));
- outl(0x40, AB_DATA(addr));
- outl(AX_DATAC, AB_INDX(addr));
- val = inl(AB_DATA(addr));
- val &= ~((1 << 3) | (1 << 4) | (1 << 9));
- val |= (bit << 3) | ((!bit) << 4) | ((!bit) << 9);
- outl(val, AB_DATA(addr));
-
- if (amd_hb_dev) {
- addr = PCIE_P_CNTL;
- pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_ADDR, addr);
-
- pci_read_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, &val);
- val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12));
- val |= bit | (bit << 3) | (bit << 12);
- val |= ((!bit) << 4) | ((!bit) << 9);
- pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, val);
-
- addr = BIF_NB;
- pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_ADDR, addr);
-
- pci_read_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, &val);
- val &= ~(1 << 8);
- val |= bit << 8;
- pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, val);
- }
-}
-
-static void amd_iso_dev_put(void)
-{
- amd_ohci_iso_count--;
- if (amd_ohci_iso_count == 0) {
- if (amd_smbus_dev) {
- pci_dev_put(amd_smbus_dev);
- amd_smbus_dev = NULL;
- }
- if (amd_hb_dev) {
- pci_dev_put(amd_hb_dev);
- amd_hb_dev = NULL;
- }
- }
-
-}
-
static void sb800_prefetch(struct ohci_hcd *ohci, int on)
{
struct pci_dev *pdev;
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index 83094d067e0..dd24fc115e4 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -52,7 +52,7 @@ __acquires(ohci->lock)
ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--;
if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) {
if (quirk_amdiso(ohci))
- quirk_amd_pll(1);
+ usb_amd_quirk_pll_enable();
if (quirk_amdprefetch(ohci))
sb800_prefetch(ohci, 0);
}
@@ -686,7 +686,7 @@ static void td_submit_urb (
}
if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) {
if (quirk_amdiso(ohci))
- quirk_amd_pll(0);
+ usb_amd_quirk_pll_disable();
if (quirk_amdprefetch(ohci))
sb800_prefetch(ohci, 1);
}
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index 8dabe8e31d8..3558491dd87 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -185,7 +185,7 @@ static struct platform_driver ohci_hcd_tmio_driver;
static int __devinit ohci_hcd_tmio_drv_probe(struct platform_device *dev)
{
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct resource *regs = platform_get_resource(dev, IORESOURCE_MEM, 0);
struct resource *config = platform_get_resource(dev, IORESOURCE_MEM, 1);
struct resource *sram = platform_get_resource(dev, IORESOURCE_MEM, 2);
@@ -274,7 +274,7 @@ static int __devexit ohci_hcd_tmio_drv_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct tmio_hcd *tmio = hcd_to_tmio(hcd);
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
usb_remove_hcd(hcd);
tmio_stop_hc(dev);
@@ -293,7 +293,7 @@ static int __devexit ohci_hcd_tmio_drv_remove(struct platform_device *dev)
#ifdef CONFIG_PM
static int ohci_hcd_tmio_drv_suspend(struct platform_device *dev, pm_message_t state)
{
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
struct tmio_hcd *tmio = hcd_to_tmio(hcd);
@@ -326,7 +326,7 @@ static int ohci_hcd_tmio_drv_suspend(struct platform_device *dev, pm_message_t s
static int ohci_hcd_tmio_drv_resume(struct platform_device *dev)
{
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
struct tmio_hcd *tmio = hcd_to_tmio(hcd);
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 51facb985c8..bad11a72c20 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -401,7 +401,7 @@ struct ohci_hcd {
#define OHCI_QUIRK_NEC 0x40 /* lost interrupts */
#define OHCI_QUIRK_FRAME_NO 0x80 /* no big endian frame_no shift */
#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */
-#define OHCI_QUIRK_AMD_ISO 0x200 /* ISO transfers*/
+#define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/
#define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */
#define OHCI_QUIRK_SHUTDOWN 0x800 /* nVidia power bug */
// there are also chip quirks/bugs in init logic
@@ -433,7 +433,7 @@ static inline int quirk_zfmicro(struct ohci_hcd *ohci)
}
static inline int quirk_amdiso(struct ohci_hcd *ohci)
{
- return ohci->flags & OHCI_QUIRK_AMD_ISO;
+ return ohci->flags & OHCI_QUIRK_AMD_PLL;
}
static inline int quirk_amdprefetch(struct ohci_hcd *ohci)
{
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index e0cb12b573f..38193f4e980 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -451,9 +451,9 @@ static void ehci_hub_descriptor(struct oxu_hcd *oxu,
temp = 1 + (ports / 8);
desc->bDescLength = 7 + 2 * temp;
- /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
- memset(&desc->bitmap[0], 0, temp);
- memset(&desc->bitmap[temp], 0xff, temp);
+ /* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
+ memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
+ memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
temp = 0x0008; /* per-port overcurrent reporting */
if (HCS_PPC(oxu->hcs_params))
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 4c502c890eb..1d586d4f7b5 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -52,6 +52,264 @@
#define EHCI_USBLEGCTLSTS 4 /* legacy control/status */
#define EHCI_USBLEGCTLSTS_SOOE (1 << 13) /* SMI on ownership change */
+/* AMD quirk use */
+#define AB_REG_BAR_LOW 0xe0
+#define AB_REG_BAR_HIGH 0xe1
+#define AB_REG_BAR_SB700 0xf0
+#define AB_INDX(addr) ((addr) + 0x00)
+#define AB_DATA(addr) ((addr) + 0x04)
+#define AX_INDXC 0x30
+#define AX_DATAC 0x34
+
+#define NB_PCIE_INDX_ADDR 0xe0
+#define NB_PCIE_INDX_DATA 0xe4
+#define PCIE_P_CNTL 0x10040
+#define BIF_NB 0x10002
+#define NB_PIF0_PWRDOWN_0 0x01100012
+#define NB_PIF0_PWRDOWN_1 0x01100013
+
+static struct amd_chipset_info {
+ struct pci_dev *nb_dev;
+ struct pci_dev *smbus_dev;
+ int nb_type;
+ int sb_type;
+ int isoc_reqs;
+ int probe_count;
+ int probe_result;
+} amd_chipset;
+
+static DEFINE_SPINLOCK(amd_lock);
+
+int usb_amd_find_chipset_info(void)
+{
+ u8 rev = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&amd_lock, flags);
+
+ amd_chipset.probe_count++;
+ /* probe only once */
+ if (amd_chipset.probe_count > 1) {
+ spin_unlock_irqrestore(&amd_lock, flags);
+ return amd_chipset.probe_result;
+ }
+
+ amd_chipset.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
+ if (amd_chipset.smbus_dev) {
+ rev = amd_chipset.smbus_dev->revision;
+ if (rev >= 0x40)
+ amd_chipset.sb_type = 1;
+ else if (rev >= 0x30 && rev <= 0x3b)
+ amd_chipset.sb_type = 3;
+ } else {
+ amd_chipset.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+ 0x780b, NULL);
+ if (!amd_chipset.smbus_dev) {
+ spin_unlock_irqrestore(&amd_lock, flags);
+ return 0;
+ }
+ rev = amd_chipset.smbus_dev->revision;
+ if (rev >= 0x11 && rev <= 0x18)
+ amd_chipset.sb_type = 2;
+ }
+
+ if (amd_chipset.sb_type == 0) {
+ if (amd_chipset.smbus_dev) {
+ pci_dev_put(amd_chipset.smbus_dev);
+ amd_chipset.smbus_dev = NULL;
+ }
+ spin_unlock_irqrestore(&amd_lock, flags);
+ return 0;
+ }
+
+ amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL);
+ if (amd_chipset.nb_dev) {
+ amd_chipset.nb_type = 1;
+ } else {
+ amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+ 0x1510, NULL);
+ if (amd_chipset.nb_dev) {
+ amd_chipset.nb_type = 2;
+ } else {
+ amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+ 0x9600, NULL);
+ if (amd_chipset.nb_dev)
+ amd_chipset.nb_type = 3;
+ }
+ }
+
+ amd_chipset.probe_result = 1;
+ printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
+
+ spin_unlock_irqrestore(&amd_lock, flags);
+ return amd_chipset.probe_result;
+}
+EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
+
+/*
+ * The hardware normally enables the A-link power management feature, which
+ * lets the system lower the power consumption in idle states.
+ *
+ * This USB quirk prevents the link going into that lower power state
+ * during isochronous transfers.
+ *
+ * Without this quirk, isochronous stream on OHCI/EHCI/xHCI controllers of
+ * some AMD platforms may stutter or have breaks occasionally.
+ */
+static void usb_amd_quirk_pll(int disable)
+{
+ u32 addr, addr_low, addr_high, val;
+ u32 bit = disable ? 0 : 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&amd_lock, flags);
+
+ if (disable) {
+ amd_chipset.isoc_reqs++;
+ if (amd_chipset.isoc_reqs > 1) {
+ spin_unlock_irqrestore(&amd_lock, flags);
+ return;
+ }
+ } else {
+ amd_chipset.isoc_reqs--;
+ if (amd_chipset.isoc_reqs > 0) {
+ spin_unlock_irqrestore(&amd_lock, flags);
+ return;
+ }
+ }
+
+ if (amd_chipset.sb_type == 1 || amd_chipset.sb_type == 2) {
+ outb_p(AB_REG_BAR_LOW, 0xcd6);
+ addr_low = inb_p(0xcd7);
+ outb_p(AB_REG_BAR_HIGH, 0xcd6);
+ addr_high = inb_p(0xcd7);
+ addr = addr_high << 8 | addr_low;
+
+ outl_p(0x30, AB_INDX(addr));
+ outl_p(0x40, AB_DATA(addr));
+ outl_p(0x34, AB_INDX(addr));
+ val = inl_p(AB_DATA(addr));
+ } else if (amd_chipset.sb_type == 3) {
+ pci_read_config_dword(amd_chipset.smbus_dev,
+ AB_REG_BAR_SB700, &addr);
+ outl(AX_INDXC, AB_INDX(addr));
+ outl(0x40, AB_DATA(addr));
+ outl(AX_DATAC, AB_INDX(addr));
+ val = inl(AB_DATA(addr));
+ } else {
+ spin_unlock_irqrestore(&amd_lock, flags);
+ return;
+ }
+
+ if (disable) {
+ val &= ~0x08;
+ val |= (1 << 4) | (1 << 9);
+ } else {
+ val |= 0x08;
+ val &= ~((1 << 4) | (1 << 9));
+ }
+ outl_p(val, AB_DATA(addr));
+
+ if (!amd_chipset.nb_dev) {
+ spin_unlock_irqrestore(&amd_lock, flags);
+ return;
+ }
+
+ if (amd_chipset.nb_type == 1 || amd_chipset.nb_type == 3) {
+ addr = PCIE_P_CNTL;
+ pci_write_config_dword(amd_chipset.nb_dev,
+ NB_PCIE_INDX_ADDR, addr);
+ pci_read_config_dword(amd_chipset.nb_dev,
+ NB_PCIE_INDX_DATA, &val);
+
+ val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12));
+ val |= bit | (bit << 3) | (bit << 12);
+ val |= ((!bit) << 4) | ((!bit) << 9);
+ pci_write_config_dword(amd_chipset.nb_dev,
+ NB_PCIE_INDX_DATA, val);
+
+ addr = BIF_NB;
+ pci_write_config_dword(amd_chipset.nb_dev,
+ NB_PCIE_INDX_ADDR, addr);
+ pci_read_config_dword(amd_chipset.nb_dev,
+ NB_PCIE_INDX_DATA, &val);
+ val &= ~(1 << 8);
+ val |= bit << 8;
+
+ pci_write_config_dword(amd_chipset.nb_dev,
+ NB_PCIE_INDX_DATA, val);
+ } else if (amd_chipset.nb_type == 2) {
+ addr = NB_PIF0_PWRDOWN_0;
+ pci_write_config_dword(amd_chipset.nb_dev,
+ NB_PCIE_INDX_ADDR, addr);
+ pci_read_config_dword(amd_chipset.nb_dev,
+ NB_PCIE_INDX_DATA, &val);
+ if (disable)
+ val &= ~(0x3f << 7);
+ else
+ val |= 0x3f << 7;
+
+ pci_write_config_dword(amd_chipset.nb_dev,
+ NB_PCIE_INDX_DATA, val);
+
+ addr = NB_PIF0_PWRDOWN_1;
+ pci_write_config_dword(amd_chipset.nb_dev,
+ NB_PCIE_INDX_ADDR, addr);
+ pci_read_config_dword(amd_chipset.nb_dev,
+ NB_PCIE_INDX_DATA, &val);
+ if (disable)
+ val &= ~(0x3f << 7);
+ else
+ val |= 0x3f << 7;
+
+ pci_write_config_dword(amd_chipset.nb_dev,
+ NB_PCIE_INDX_DATA, val);
+ }
+
+ spin_unlock_irqrestore(&amd_lock, flags);
+ return;
+}
+
+void usb_amd_quirk_pll_disable(void)
+{
+ usb_amd_quirk_pll(1);
+}
+EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable);
+
+void usb_amd_quirk_pll_enable(void)
+{
+ usb_amd_quirk_pll(0);
+}
+EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable);
+
+void usb_amd_dev_put(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&amd_lock, flags);
+
+ amd_chipset.probe_count--;
+ if (amd_chipset.probe_count > 0) {
+ spin_unlock_irqrestore(&amd_lock, flags);
+ return;
+ }
+
+ if (amd_chipset.nb_dev) {
+ pci_dev_put(amd_chipset.nb_dev);
+ amd_chipset.nb_dev = NULL;
+ }
+ if (amd_chipset.smbus_dev) {
+ pci_dev_put(amd_chipset.smbus_dev);
+ amd_chipset.smbus_dev = NULL;
+ }
+ amd_chipset.nb_type = 0;
+ amd_chipset.sb_type = 0;
+ amd_chipset.isoc_reqs = 0;
+ amd_chipset.probe_result = 0;
+
+ spin_unlock_irqrestore(&amd_lock, flags);
+}
+EXPORT_SYMBOL_GPL(usb_amd_dev_put);
/*
* Make sure the controller is completely inactive, unable to
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 1564edfff6f..6ae9f78e993 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -1,7 +1,17 @@
#ifndef __LINUX_USB_PCI_QUIRKS_H
#define __LINUX_USB_PCI_QUIRKS_H
+#ifdef CONFIG_PCI
void uhci_reset_hc(struct pci_dev *pdev, unsigned long base);
int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base);
+int usb_amd_find_chipset_info(void);
+void usb_amd_dev_put(void);
+void usb_amd_quirk_pll_disable(void);
+void usb_amd_quirk_pll_enable(void);
+#else
+static inline void usb_amd_quirk_pll_disable(void) {}
+static inline void usb_amd_quirk_pll_enable(void) {}
+static inline void usb_amd_dev_put(void) {}
+#endif /* CONFIG_PCI */
#endif /* __LINUX_USB_PCI_QUIRKS_H */
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 3076b1cc05d..db6f8b9c19b 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -2150,8 +2150,9 @@ static void r8a66597_hub_descriptor(struct r8a66597 *r8a66597,
desc->bDescLength = 9;
desc->bPwrOn2PwrGood = 0;
desc->wHubCharacteristics = cpu_to_le16(0x0011);
- desc->bitmap[0] = ((1 << r8a66597->max_root_hub) - 1) << 1;
- desc->bitmap[1] = ~0;
+ desc->u.hs.DeviceRemovable[0] =
+ ((1 << r8a66597->max_root_hub) - 1) << 1;
+ desc->u.hs.DeviceRemovable[1] = ~0;
}
static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 2e9602a10e9..18b7099a812 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -1111,9 +1111,9 @@ sl811h_hub_descriptor (
desc->wHubCharacteristics = cpu_to_le16(temp);
- /* two bitmaps: ports removable, and legacy PortPwrCtrlMask */
- desc->bitmap[0] = 0 << 1;
- desc->bitmap[1] = ~0;
+ /* ports removable, and legacy PortPwrCtrlMask */
+ desc->u.hs.DeviceRemovable[0] = 0 << 1;
+ desc->u.hs.DeviceRemovable[1] = ~0;
}
static void
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index fab764946c7..b4785934e09 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -2604,13 +2604,14 @@ static int u132_roothub_descriptor(struct u132 *u132,
retval = u132_read_pcimem(u132, roothub.b, &rh_b);
if (retval)
return retval;
- memset(desc->bitmap, 0xff, sizeof(desc->bitmap));
- desc->bitmap[0] = rh_b & RH_B_DR;
+ memset(desc->u.hs.DeviceRemovable, 0xff,
+ sizeof(desc->u.hs.DeviceRemovable));
+ desc->u.hs.DeviceRemovable[0] = rh_b & RH_B_DR;
if (u132->num_ports > 7) {
- desc->bitmap[1] = (rh_b & RH_B_DR) >> 8;
- desc->bitmap[2] = 0xff;
+ desc->u.hs.DeviceRemovable[1] = (rh_b & RH_B_DR) >> 8;
+ desc->u.hs.DeviceRemovable[2] = 0xff;
} else
- desc->bitmap[1] = 0xff;
+ desc->u.hs.DeviceRemovable[1] = 0xff;
return 0;
}
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index cee867829ec..4f65b14e5e0 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -471,7 +471,7 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)
/*
* Store the current frame number in uhci->frame_number if the controller
- * is runnning. Expand from 11 bits (of which we use only 10) to a
+ * is running. Expand from 11 bits (of which we use only 10) to a
* full-sized integer.
*
* Like many other parts of the driver, this code relies on being polled
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index 78c4edac1db..ce5c9e51748 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -19,8 +19,8 @@
* along with this program; if not, write to the Free Software Foundation,
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/* Up to 16 microframes to halt an HC - one microframe is 125 microsectonds */
-#define XHCI_MAX_HALT_USEC (16*125)
+/* Up to 16 ms to halt an HC */
+#define XHCI_MAX_HALT_USEC (16*1000)
/* HC not running - set to 1 when run/stop bit is cleared. */
#define XHCI_STS_HALT (1<<0)
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 5d963e35049..a78f2ebd11b 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -28,27 +28,15 @@
#define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
PORT_RC | PORT_PLC | PORT_PE)
-static void xhci_hub_descriptor(struct xhci_hcd *xhci,
- struct usb_hub_descriptor *desc)
+static void xhci_common_hub_descriptor(struct xhci_hcd *xhci,
+ struct usb_hub_descriptor *desc, int ports)
{
- int ports;
u16 temp;
- ports = HCS_MAX_PORTS(xhci->hcs_params1);
-
- /* USB 3.0 hubs have a different descriptor, but we fake this for now */
- desc->bDescriptorType = 0x29;
desc->bPwrOn2PwrGood = 10; /* xhci section 5.4.9 says 20ms max */
desc->bHubContrCurrent = 0;
desc->bNbrPorts = ports;
- temp = 1 + (ports / 8);
- desc->bDescLength = 7 + 2 * temp;
-
- /* Why does core/hcd.h define bitmap? It's just confusing. */
- memset(&desc->DeviceRemovable[0], 0, temp);
- memset(&desc->DeviceRemovable[temp], 0xff, temp);
-
/* Ugh, these should be #defines, FIXME */
/* Using table 11-13 in USB 2.0 spec. */
temp = 0;
@@ -65,14 +53,108 @@ static void xhci_hub_descriptor(struct xhci_hcd *xhci,
desc->wHubCharacteristics = (__force __u16) cpu_to_le16(temp);
}
+/* Fill in the USB 2.0 roothub descriptor */
+static void xhci_usb2_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
+ struct usb_hub_descriptor *desc)
+{
+ int ports;
+ u16 temp;
+ __u8 port_removable[(USB_MAXCHILDREN + 1 + 7) / 8];
+ u32 portsc;
+ unsigned int i;
+
+ ports = xhci->num_usb2_ports;
+
+ xhci_common_hub_descriptor(xhci, desc, ports);
+ desc->bDescriptorType = 0x29;
+ temp = 1 + (ports / 8);
+ desc->bDescLength = 7 + 2 * temp;
+
+ /* The Device Removable bits are reported on a byte granularity.
+ * If the port doesn't exist within that byte, the bit is set to 0.
+ */
+ memset(port_removable, 0, sizeof(port_removable));
+ for (i = 0; i < ports; i++) {
+ portsc = xhci_readl(xhci, xhci->usb3_ports[i]);
+ /* If a device is removable, PORTSC reports a 0, same as in the
+ * hub descriptor DeviceRemovable bits.
+ */
+ if (portsc & PORT_DEV_REMOVE)
+ /* This math is hairy because bit 0 of DeviceRemovable
+ * is reserved, and bit 1 is for port 1, etc.
+ */
+ port_removable[(i + 1) / 8] |= 1 << ((i + 1) % 8);
+ }
+
+ /* ch11.h defines a hub descriptor that has room for USB_MAXCHILDREN
+ * ports on it. The USB 2.0 specification says that there are two
+ * variable length fields at the end of the hub descriptor:
+ * DeviceRemovable and PortPwrCtrlMask. But since we can have less than
+ * USB_MAXCHILDREN ports, we may need to use the DeviceRemovable array
+ * to set PortPwrCtrlMask bits. PortPwrCtrlMask must always be set to
+ * 0xFF, so we initialize the both arrays (DeviceRemovable and
+ * PortPwrCtrlMask) to 0xFF. Then we set the DeviceRemovable for each
+ * set of ports that actually exist.
+ */
+ memset(desc->u.hs.DeviceRemovable, 0xff,
+ sizeof(desc->u.hs.DeviceRemovable));
+ memset(desc->u.hs.PortPwrCtrlMask, 0xff,
+ sizeof(desc->u.hs.PortPwrCtrlMask));
+
+ for (i = 0; i < (ports + 1 + 7) / 8; i++)
+ memset(&desc->u.hs.DeviceRemovable[i], port_removable[i],
+ sizeof(__u8));
+}
+
+/* Fill in the USB 3.0 roothub descriptor */
+static void xhci_usb3_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
+ struct usb_hub_descriptor *desc)
+{
+ int ports;
+ u16 port_removable;
+ u32 portsc;
+ unsigned int i;
+
+ ports = xhci->num_usb3_ports;
+ xhci_common_hub_descriptor(xhci, desc, ports);
+ desc->bDescriptorType = 0x2a;
+ desc->bDescLength = 12;
+
+ /* header decode latency should be zero for roothubs,
+ * see section 4.23.5.2.
+ */
+ desc->u.ss.bHubHdrDecLat = 0;
+ desc->u.ss.wHubDelay = 0;
+
+ port_removable = 0;
+ /* bit 0 is reserved, bit 1 is for port 1, etc. */
+ for (i = 0; i < ports; i++) {
+ portsc = xhci_readl(xhci, xhci->usb3_ports[i]);
+ if (portsc & PORT_DEV_REMOVE)
+ port_removable |= 1 << (i + 1);
+ }
+ memset(&desc->u.ss.DeviceRemovable,
+ (__force __u16) cpu_to_le16(port_removable),
+ sizeof(__u16));
+}
+
+static void xhci_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
+ struct usb_hub_descriptor *desc)
+{
+
+ if (hcd->speed == HCD_USB3)
+ xhci_usb3_hub_descriptor(hcd, xhci, desc);
+ else
+ xhci_usb2_hub_descriptor(hcd, xhci, desc);
+
+}
+
static unsigned int xhci_port_speed(unsigned int port_status)
{
if (DEV_LOWSPEED(port_status))
return USB_PORT_STAT_LOW_SPEED;
if (DEV_HIGHSPEED(port_status))
return USB_PORT_STAT_HIGH_SPEED;
- if (DEV_SUPERSPEED(port_status))
- return USB_PORT_STAT_SUPER_SPEED;
/*
* FIXME: Yes, we should check for full speed, but the core uses that as
* a default in portspeed() in usb/core/hub.c (which is the only place
@@ -135,17 +217,22 @@ u32 xhci_port_state_to_neutral(u32 state)
/*
* find slot id based on port number.
+ * @port: The one-based port number from one of the two split roothubs.
*/
-int xhci_find_slot_id_by_port(struct xhci_hcd *xhci, u16 port)
+int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
+ u16 port)
{
int slot_id;
int i;
+ enum usb_device_speed speed;
slot_id = 0;
for (i = 0; i < MAX_HC_SLOTS; i++) {
if (!xhci->devs[i])
continue;
- if (xhci->devs[i]->port == port) {
+ speed = xhci->devs[i]->udev->speed;
+ if (((speed == USB_SPEED_SUPER) == (hcd->speed == HCD_USB3))
+ && xhci->devs[i]->port == port) {
slot_id = i;
break;
}
@@ -226,11 +313,11 @@ void xhci_ring_device(struct xhci_hcd *xhci, int slot_id)
return;
}
-static void xhci_disable_port(struct xhci_hcd *xhci, u16 wIndex,
- u32 __iomem *addr, u32 port_status)
+static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
+ u16 wIndex, u32 __iomem *addr, u32 port_status)
{
/* Don't allow the USB core to disable SuperSpeed ports. */
- if (xhci->port_array[wIndex] == 0x03) {
+ if (hcd->speed == HCD_USB3) {
xhci_dbg(xhci, "Ignoring request to disable "
"SuperSpeed port.\n");
return;
@@ -289,10 +376,18 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
unsigned long flags;
u32 temp, temp1, status;
int retval = 0;
- u32 __iomem *addr;
+ u32 __iomem **port_array;
int slot_id;
-
- ports = HCS_MAX_PORTS(xhci->hcs_params1);
+ struct xhci_bus_state *bus_state;
+
+ if (hcd->speed == HCD_USB3) {
+ ports = xhci->num_usb3_ports;
+ port_array = xhci->usb3_ports;
+ } else {
+ ports = xhci->num_usb2_ports;
+ port_array = xhci->usb2_ports;
+ }
+ bus_state = &xhci->bus_state[hcd_index(hcd)];
spin_lock_irqsave(&xhci->lock, flags);
switch (typeReq) {
@@ -301,17 +396,35 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
memset(buf, 0, 4);
break;
case GetHubDescriptor:
- xhci_hub_descriptor(xhci, (struct usb_hub_descriptor *) buf);
+ /* Check to make sure userspace is asking for the USB 3.0 hub
+ * descriptor for the USB 3.0 roothub. If not, we stall the
+ * endpoint, like external hubs do.
+ */
+ if (hcd->speed == HCD_USB3 &&
+ (wLength < USB_DT_SS_HUB_SIZE ||
+ wValue != (USB_DT_SS_HUB << 8))) {
+ xhci_dbg(xhci, "Wrong hub descriptor type for "
+ "USB 3.0 roothub.\n");
+ goto error;
+ }
+ xhci_hub_descriptor(hcd, xhci,
+ (struct usb_hub_descriptor *) buf);
break;
case GetPortStatus:
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
status = 0;
- addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(wIndex & 0xff);
- temp = xhci_readl(xhci, addr);
+ temp = xhci_readl(xhci, port_array[wIndex]);
+ if (temp == 0xffffffff) {
+ retval = -ENODEV;
+ break;
+ }
xhci_dbg(xhci, "get port status, actual port %d status = 0x%x\n", wIndex, temp);
+ /* FIXME - should we return a port status value like the USB
+ * 3.0 external hubs do?
+ */
/* wPortChange bits */
if (temp & PORT_CSC)
status |= USB_PORT_STAT_C_CONNECTION << 16;
@@ -330,38 +443,33 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
if ((temp & PORT_RESET) || !(temp & PORT_PE))
goto error;
if (!DEV_SUPERSPEED(temp) && time_after_eq(jiffies,
- xhci->resume_done[wIndex])) {
+ bus_state->resume_done[wIndex])) {
xhci_dbg(xhci, "Resume USB2 port %d\n",
wIndex + 1);
- xhci->resume_done[wIndex] = 0;
+ bus_state->resume_done[wIndex] = 0;
temp1 = xhci_port_state_to_neutral(temp);
temp1 &= ~PORT_PLS_MASK;
temp1 |= PORT_LINK_STROBE | XDEV_U0;
- xhci_writel(xhci, temp1, addr);
+ xhci_writel(xhci, temp1, port_array[wIndex]);
xhci_dbg(xhci, "set port %d resume\n",
wIndex + 1);
- slot_id = xhci_find_slot_id_by_port(xhci,
+ slot_id = xhci_find_slot_id_by_port(hcd, xhci,
wIndex + 1);
if (!slot_id) {
xhci_dbg(xhci, "slot_id is zero\n");
goto error;
}
xhci_ring_device(xhci, slot_id);
- xhci->port_c_suspend[wIndex >> 5] |=
- 1 << (wIndex & 31);
- xhci->suspended_ports[wIndex >> 5] &=
- ~(1 << (wIndex & 31));
+ bus_state->port_c_suspend |= 1 << wIndex;
+ bus_state->suspended_ports &= ~(1 << wIndex);
}
}
if ((temp & PORT_PLS_MASK) == XDEV_U0
&& (temp & PORT_POWER)
- && (xhci->suspended_ports[wIndex >> 5] &
- (1 << (wIndex & 31)))) {
- xhci->suspended_ports[wIndex >> 5] &=
- ~(1 << (wIndex & 31));
- xhci->port_c_suspend[wIndex >> 5] |=
- 1 << (wIndex & 31);
+ && (bus_state->suspended_ports & (1 << wIndex))) {
+ bus_state->suspended_ports &= ~(1 << wIndex);
+ bus_state->port_c_suspend |= 1 << wIndex;
}
if (temp & PORT_CONNECT) {
status |= USB_PORT_STAT_CONNECTION;
@@ -375,7 +483,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
status |= USB_PORT_STAT_RESET;
if (temp & PORT_POWER)
status |= USB_PORT_STAT_POWER;
- if (xhci->port_c_suspend[wIndex >> 5] & (1 << (wIndex & 31)))
+ if (bus_state->port_c_suspend & (1 << wIndex))
status |= 1 << USB_PORT_FEAT_C_SUSPEND;
xhci_dbg(xhci, "Get port status returned 0x%x\n", status);
put_unaligned(cpu_to_le32(status), (__le32 *) buf);
@@ -385,12 +493,16 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
- addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(wIndex & 0xff);
- temp = xhci_readl(xhci, addr);
+ temp = xhci_readl(xhci, port_array[wIndex]);
+ if (temp == 0xffffffff) {
+ retval = -ENODEV;
+ break;
+ }
temp = xhci_port_state_to_neutral(temp);
+ /* FIXME: What new port features do we need to support? */
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
- temp = xhci_readl(xhci, addr);
+ temp = xhci_readl(xhci, port_array[wIndex]);
/* In spec software should not attempt to suspend
* a port unless the port reports that it is in the
* enabled (PED = ‘1’,PLS < ‘3’) state.
@@ -402,7 +514,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
}
- slot_id = xhci_find_slot_id_by_port(xhci, wIndex + 1);
+ slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+ wIndex + 1);
if (!slot_id) {
xhci_warn(xhci, "slot_id is zero\n");
goto error;
@@ -415,15 +528,14 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
temp = xhci_port_state_to_neutral(temp);
temp &= ~PORT_PLS_MASK;
temp |= PORT_LINK_STROBE | XDEV_U3;
- xhci_writel(xhci, temp, addr);
+ xhci_writel(xhci, temp, port_array[wIndex]);
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(10); /* wait device to enter */
spin_lock_irqsave(&xhci->lock, flags);
- temp = xhci_readl(xhci, addr);
- xhci->suspended_ports[wIndex >> 5] |=
- 1 << (wIndex & (31));
+ temp = xhci_readl(xhci, port_array[wIndex]);
+ bus_state->suspended_ports |= 1 << wIndex;
break;
case USB_PORT_FEAT_POWER:
/*
@@ -432,34 +544,39 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
* However, khubd will ignore the roothub events until
* the roothub is registered.
*/
- xhci_writel(xhci, temp | PORT_POWER, addr);
+ xhci_writel(xhci, temp | PORT_POWER,
+ port_array[wIndex]);
- temp = xhci_readl(xhci, addr);
+ temp = xhci_readl(xhci, port_array[wIndex]);
xhci_dbg(xhci, "set port power, actual port %d status = 0x%x\n", wIndex, temp);
break;
case USB_PORT_FEAT_RESET:
temp = (temp | PORT_RESET);
- xhci_writel(xhci, temp, addr);
+ xhci_writel(xhci, temp, port_array[wIndex]);
- temp = xhci_readl(xhci, addr);
+ temp = xhci_readl(xhci, port_array[wIndex]);
xhci_dbg(xhci, "set port reset, actual port %d status = 0x%x\n", wIndex, temp);
break;
default:
goto error;
}
- temp = xhci_readl(xhci, addr); /* unblock any posted writes */
+ /* unblock any posted writes */
+ temp = xhci_readl(xhci, port_array[wIndex]);
break;
case ClearPortFeature:
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
- addr = &xhci->op_regs->port_status_base +
- NUM_PORT_REGS*(wIndex & 0xff);
- temp = xhci_readl(xhci, addr);
+ temp = xhci_readl(xhci, port_array[wIndex]);
+ if (temp == 0xffffffff) {
+ retval = -ENODEV;
+ break;
+ }
+ /* FIXME: What new port features do we need to support? */
temp = xhci_port_state_to_neutral(temp);
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
- temp = xhci_readl(xhci, addr);
+ temp = xhci_readl(xhci, port_array[wIndex]);
xhci_dbg(xhci, "clear USB_PORT_FEAT_SUSPEND\n");
xhci_dbg(xhci, "PORTSC %04x\n", temp);
if (temp & PORT_RESET)
@@ -471,30 +588,34 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
temp = xhci_port_state_to_neutral(temp);
temp &= ~PORT_PLS_MASK;
temp |= PORT_LINK_STROBE | XDEV_U0;
- xhci_writel(xhci, temp, addr);
- xhci_readl(xhci, addr);
+ xhci_writel(xhci, temp,
+ port_array[wIndex]);
+ xhci_readl(xhci, port_array[wIndex]);
} else {
temp = xhci_port_state_to_neutral(temp);
temp &= ~PORT_PLS_MASK;
temp |= PORT_LINK_STROBE | XDEV_RESUME;
- xhci_writel(xhci, temp, addr);
+ xhci_writel(xhci, temp,
+ port_array[wIndex]);
spin_unlock_irqrestore(&xhci->lock,
flags);
msleep(20);
spin_lock_irqsave(&xhci->lock, flags);
- temp = xhci_readl(xhci, addr);
+ temp = xhci_readl(xhci,
+ port_array[wIndex]);
temp = xhci_port_state_to_neutral(temp);
temp &= ~PORT_PLS_MASK;
temp |= PORT_LINK_STROBE | XDEV_U0;
- xhci_writel(xhci, temp, addr);
+ xhci_writel(xhci, temp,
+ port_array[wIndex]);
}
- xhci->port_c_suspend[wIndex >> 5] |=
- 1 << (wIndex & 31);
+ bus_state->port_c_suspend |= 1 << wIndex;
}
- slot_id = xhci_find_slot_id_by_port(xhci, wIndex + 1);
+ slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+ wIndex + 1);
if (!slot_id) {
xhci_dbg(xhci, "slot_id is zero\n");
goto error;
@@ -502,17 +623,17 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_ring_device(xhci, slot_id);
break;
case USB_PORT_FEAT_C_SUSPEND:
- xhci->port_c_suspend[wIndex >> 5] &=
- ~(1 << (wIndex & 31));
+ bus_state->port_c_suspend &= ~(1 << wIndex);
case USB_PORT_FEAT_C_RESET:
case USB_PORT_FEAT_C_CONNECTION:
case USB_PORT_FEAT_C_OVER_CURRENT:
case USB_PORT_FEAT_C_ENABLE:
xhci_clear_port_change_bit(xhci, wValue, wIndex,
- addr, temp);
+ port_array[wIndex], temp);
break;
case USB_PORT_FEAT_ENABLE:
- xhci_disable_port(xhci, wIndex, addr, temp);
+ xhci_disable_port(hcd, xhci, wIndex,
+ port_array[wIndex], temp);
break;
default:
goto error;
@@ -543,9 +664,17 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
int i, retval;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int ports;
- u32 __iomem *addr;
-
- ports = HCS_MAX_PORTS(xhci->hcs_params1);
+ u32 __iomem **port_array;
+ struct xhci_bus_state *bus_state;
+
+ if (hcd->speed == HCD_USB3) {
+ ports = xhci->num_usb3_ports;
+ port_array = xhci->usb3_ports;
+ } else {
+ ports = xhci->num_usb2_ports;
+ port_array = xhci->usb2_ports;
+ }
+ bus_state = &xhci->bus_state[hcd_index(hcd)];
/* Initial status is no changes */
retval = (ports + 8) / 8;
@@ -557,13 +686,15 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
spin_lock_irqsave(&xhci->lock, flags);
/* For each port, did anything change? If so, set that bit in buf. */
for (i = 0; i < ports; i++) {
- addr = &xhci->op_regs->port_status_base +
- NUM_PORT_REGS*i;
- temp = xhci_readl(xhci, addr);
+ temp = xhci_readl(xhci, port_array[i]);
+ if (temp == 0xffffffff) {
+ retval = -ENODEV;
+ break;
+ }
if ((temp & mask) != 0 ||
- (xhci->port_c_suspend[i >> 5] & 1 << (i & 31)) ||
- (xhci->resume_done[i] && time_after_eq(
- jiffies, xhci->resume_done[i]))) {
+ (bus_state->port_c_suspend & 1 << i) ||
+ (bus_state->resume_done[i] && time_after_eq(
+ jiffies, bus_state->resume_done[i]))) {
buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
status = 1;
}
@@ -577,42 +708,51 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
int xhci_bus_suspend(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- int port;
+ int max_ports, port_index;
+ u32 __iomem **port_array;
+ struct xhci_bus_state *bus_state;
unsigned long flags;
- xhci_dbg(xhci, "suspend root hub\n");
+ if (hcd->speed == HCD_USB3) {
+ max_ports = xhci->num_usb3_ports;
+ port_array = xhci->usb3_ports;
+ xhci_dbg(xhci, "suspend USB 3.0 root hub\n");
+ } else {
+ max_ports = xhci->num_usb2_ports;
+ port_array = xhci->usb2_ports;
+ xhci_dbg(xhci, "suspend USB 2.0 root hub\n");
+ }
+ bus_state = &xhci->bus_state[hcd_index(hcd)];
spin_lock_irqsave(&xhci->lock, flags);
if (hcd->self.root_hub->do_remote_wakeup) {
- port = HCS_MAX_PORTS(xhci->hcs_params1);
- while (port--) {
- if (xhci->resume_done[port] != 0) {
+ port_index = max_ports;
+ while (port_index--) {
+ if (bus_state->resume_done[port_index] != 0) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "suspend failed because "
"port %d is resuming\n",
- port + 1);
+ port_index + 1);
return -EBUSY;
}
}
}
- port = HCS_MAX_PORTS(xhci->hcs_params1);
- xhci->bus_suspended = 0;
- while (port--) {
+ port_index = max_ports;
+ bus_state->bus_suspended = 0;
+ while (port_index--) {
/* suspend the port if the port is not suspended */
- u32 __iomem *addr;
u32 t1, t2;
int slot_id;
- addr = &xhci->op_regs->port_status_base +
- NUM_PORT_REGS * (port & 0xff);
- t1 = xhci_readl(xhci, addr);
+ t1 = xhci_readl(xhci, port_array[port_index]);
t2 = xhci_port_state_to_neutral(t1);
if ((t1 & PORT_PE) && !(t1 & PORT_PLS_MASK)) {
- xhci_dbg(xhci, "port %d not suspended\n", port);
- slot_id = xhci_find_slot_id_by_port(xhci, port + 1);
+ xhci_dbg(xhci, "port %d not suspended\n", port_index);
+ slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+ port_index + 1);
if (slot_id) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_stop_device(xhci, slot_id, 1);
@@ -620,7 +760,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
}
t2 &= ~PORT_PLS_MASK;
t2 |= PORT_LINK_STROBE | XDEV_U3;
- set_bit(port, &xhci->bus_suspended);
+ set_bit(port_index, &bus_state->bus_suspended);
}
if (hcd->self.root_hub->do_remote_wakeup) {
if (t1 & PORT_CONNECT) {
@@ -635,22 +775,24 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
t1 = xhci_port_state_to_neutral(t1);
if (t1 != t2)
- xhci_writel(xhci, t2, addr);
+ xhci_writel(xhci, t2, port_array[port_index]);
if (DEV_HIGHSPEED(t1)) {
/* enable remote wake up for USB 2.0 */
u32 __iomem *addr;
u32 tmp;
- addr = &xhci->op_regs->port_power_base +
- NUM_PORT_REGS * (port & 0xff);
+ /* Add one to the port status register address to get
+ * the port power control register address.
+ */
+ addr = port_array[port_index] + 1;
tmp = xhci_readl(xhci, addr);
tmp |= PORT_RWE;
xhci_writel(xhci, tmp, addr);
}
}
hcd->state = HC_STATE_SUSPENDED;
- xhci->next_statechange = jiffies + msecs_to_jiffies(10);
+ bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
spin_unlock_irqrestore(&xhci->lock, flags);
return 0;
}
@@ -658,13 +800,24 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
int xhci_bus_resume(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- int port;
+ int max_ports, port_index;
+ u32 __iomem **port_array;
+ struct xhci_bus_state *bus_state;
u32 temp;
unsigned long flags;
- xhci_dbg(xhci, "resume root hub\n");
+ if (hcd->speed == HCD_USB3) {
+ max_ports = xhci->num_usb3_ports;
+ port_array = xhci->usb3_ports;
+ xhci_dbg(xhci, "resume USB 3.0 root hub\n");
+ } else {
+ max_ports = xhci->num_usb2_ports;
+ port_array = xhci->usb2_ports;
+ xhci_dbg(xhci, "resume USB 2.0 root hub\n");
+ }
+ bus_state = &xhci->bus_state[hcd_index(hcd)];
- if (time_before(jiffies, xhci->next_statechange))
+ if (time_before(jiffies, bus_state->next_statechange))
msleep(5);
spin_lock_irqsave(&xhci->lock, flags);
@@ -678,57 +831,57 @@ int xhci_bus_resume(struct usb_hcd *hcd)
temp &= ~CMD_EIE;
xhci_writel(xhci, temp, &xhci->op_regs->command);
- port = HCS_MAX_PORTS(xhci->hcs_params1);
- while (port--) {
+ port_index = max_ports;
+ while (port_index--) {
/* Check whether need resume ports. If needed
resume port and disable remote wakeup */
- u32 __iomem *addr;
u32 temp;
int slot_id;
- addr = &xhci->op_regs->port_status_base +
- NUM_PORT_REGS * (port & 0xff);
- temp = xhci_readl(xhci, addr);
+ temp = xhci_readl(xhci, port_array[port_index]);
if (DEV_SUPERSPEED(temp))
temp &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
else
temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
- if (test_bit(port, &xhci->bus_suspended) &&
+ if (test_bit(port_index, &bus_state->bus_suspended) &&
(temp & PORT_PLS_MASK)) {
if (DEV_SUPERSPEED(temp)) {
temp = xhci_port_state_to_neutral(temp);
temp &= ~PORT_PLS_MASK;
temp |= PORT_LINK_STROBE | XDEV_U0;
- xhci_writel(xhci, temp, addr);
+ xhci_writel(xhci, temp, port_array[port_index]);
} else {
temp = xhci_port_state_to_neutral(temp);
temp &= ~PORT_PLS_MASK;
temp |= PORT_LINK_STROBE | XDEV_RESUME;
- xhci_writel(xhci, temp, addr);
+ xhci_writel(xhci, temp, port_array[port_index]);
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(20);
spin_lock_irqsave(&xhci->lock, flags);
- temp = xhci_readl(xhci, addr);
+ temp = xhci_readl(xhci, port_array[port_index]);
temp = xhci_port_state_to_neutral(temp);
temp &= ~PORT_PLS_MASK;
temp |= PORT_LINK_STROBE | XDEV_U0;
- xhci_writel(xhci, temp, addr);
+ xhci_writel(xhci, temp, port_array[port_index]);
}
- slot_id = xhci_find_slot_id_by_port(xhci, port + 1);
+ slot_id = xhci_find_slot_id_by_port(hcd,
+ xhci, port_index + 1);
if (slot_id)
xhci_ring_device(xhci, slot_id);
} else
- xhci_writel(xhci, temp, addr);
+ xhci_writel(xhci, temp, port_array[port_index]);
if (DEV_HIGHSPEED(temp)) {
/* disable remote wake up for USB 2.0 */
u32 __iomem *addr;
u32 tmp;
- addr = &xhci->op_regs->port_power_base +
- NUM_PORT_REGS * (port & 0xff);
+ /* Add one to the port status register address to get
+ * the port power control register address.
+ */
+ addr = port_array[port_index] + 1;
tmp = xhci_readl(xhci, addr);
tmp &= ~PORT_RWE;
xhci_writel(xhci, tmp, addr);
@@ -737,8 +890,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
(void) xhci_readl(xhci, &xhci->op_regs->command);
- xhci->next_statechange = jiffies + msecs_to_jiffies(5);
- hcd->state = HC_STATE_RUNNING;
+ bus_state->next_statechange = jiffies + msecs_to_jiffies(5);
/* re-enable irqs */
temp = xhci_readl(xhci, &xhci->op_regs->command);
temp |= CMD_EIE;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 0de31000532..d188746b362 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -814,14 +814,64 @@ void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
ep0_ctx->deq |= ep_ring->cycle_state;
}
+/*
+ * The xHCI roothub may have ports of differing speeds in any order in the port
+ * status registers. xhci->port_array provides an array of the port speed for
+ * each offset into the port status registers.
+ *
+ * The xHCI hardware wants to know the roothub port number that the USB device
+ * is attached to (or the roothub port its ancestor hub is attached to). All we
+ * know is the index of that port under either the USB 2.0 or the USB 3.0
+ * roothub, but that doesn't give us the real index into the HW port status
+ * registers. Scan through the xHCI roothub port array, looking for the Nth
+ * entry of the correct port speed. Return the port number of that entry.
+ */
+static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
+ struct usb_device *udev)
+{
+ struct usb_device *top_dev;
+ unsigned int num_similar_speed_ports;
+ unsigned int faked_port_num;
+ int i;
+
+ for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
+ top_dev = top_dev->parent)
+ /* Found device below root hub */;
+ faked_port_num = top_dev->portnum;
+ for (i = 0, num_similar_speed_ports = 0;
+ i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
+ u8 port_speed = xhci->port_array[i];
+
+ /*
+ * Skip ports that don't have known speeds, or have duplicate
+ * Extended Capabilities port speed entries.
+ */
+ if (port_speed == 0 || port_speed == -1)
+ continue;
+
+ /*
+ * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and
+ * 1.1 ports are under the USB 2.0 hub. If the port speed
+ * matches the device speed, it's a similar speed port.
+ */
+ if ((port_speed == 0x03) == (udev->speed == USB_SPEED_SUPER))
+ num_similar_speed_ports++;
+ if (num_similar_speed_ports == faked_port_num)
+ /* Roothub ports are numbered from 1 to N */
+ return i+1;
+ }
+ return 0;
+}
+
/* Setup an xHCI virtual device for a Set Address command */
int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
{
struct xhci_virt_device *dev;
struct xhci_ep_ctx *ep0_ctx;
- struct usb_device *top_dev;
struct xhci_slot_ctx *slot_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
+ u32 port_num;
+ struct usb_device *top_dev;
dev = xhci->devs[udev->slot_id];
/* Slot ID 0 is reserved */
@@ -863,16 +913,20 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
BUG();
}
/* Find the root hub port this device is under */
+ port_num = xhci_find_real_port_number(xhci, udev);
+ if (!port_num)
+ return -EINVAL;
+ slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(port_num);
+ /* Set the port number in the virtual_device to the faked port number */
for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
top_dev = top_dev->parent)
/* Found device below root hub */;
- slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
dev->port = top_dev->portnum;
- xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
+ xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
+ xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->port);
- /* Is this a LS/FS device under a HS hub? */
- if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
- udev->tt) {
+ /* Is this a LS/FS device under an external HS hub? */
+ if (udev->tt && udev->tt->hub->parent) {
slot_ctx->tt_info = udev->tt->hub->slot_id;
slot_ctx->tt_info |= udev->ttport << 8;
if (udev->tt->multi)
@@ -1486,7 +1540,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
xhci->page_size = 0;
xhci->page_shift = 0;
- xhci->bus_suspended = 0;
+ xhci->bus_state[0].bus_suspended = 0;
+ xhci->bus_state[1].bus_suspended = 0;
}
static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
@@ -1782,6 +1837,20 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
}
xhci_dbg(xhci, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n",
xhci->num_usb2_ports, xhci->num_usb3_ports);
+
+ /* Place limits on the number of roothub ports so that the hub
+ * descriptors aren't longer than the USB core will allocate.
+ */
+ if (xhci->num_usb3_ports > 15) {
+ xhci_dbg(xhci, "Limiting USB 3.0 roothub ports to 15.\n");
+ xhci->num_usb3_ports = 15;
+ }
+ if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
+ xhci_dbg(xhci, "Limiting USB 2.0 roothub ports to %u.\n",
+ USB_MAXCHILDREN);
+ xhci->num_usb2_ports = USB_MAXCHILDREN;
+ }
+
/*
* Note we could have all USB 3.0 ports, or all USB 2.0 ports.
* Not sure how the USB core will handle a hub with no ports...
@@ -1806,6 +1875,8 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
"addr = %p\n", i,
xhci->usb2_ports[port_index]);
port_index++;
+ if (port_index == xhci->num_usb2_ports)
+ break;
}
}
if (xhci->num_usb3_ports) {
@@ -1824,6 +1895,8 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
"addr = %p\n", i,
xhci->usb3_ports[port_index]);
port_index++;
+ if (port_index == xhci->num_usb3_ports)
+ break;
}
}
return 0;
@@ -2005,8 +2078,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
init_completion(&xhci->addr_dev);
for (i = 0; i < MAX_HC_SLOTS; ++i)
xhci->devs[i] = NULL;
- for (i = 0; i < MAX_HC_PORTS; ++i)
- xhci->resume_done[i] = 0;
+ for (i = 0; i < USB_MAXCHILDREN; ++i) {
+ xhci->bus_state[0].resume_done[i] = 0;
+ xhci->bus_state[1].resume_done[i] = 0;
+ }
if (scratchpad_alloc(xhci, flags))
goto fail;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index bb668a894ab..ceea9f33491 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -50,13 +50,45 @@ static int xhci_pci_reinit(struct xhci_hcd *xhci, struct pci_dev *pdev)
/* called during probe() after chip reset completes */
static int xhci_pci_setup(struct usb_hcd *hcd)
{
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_hcd *xhci;
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
int retval;
u32 temp;
hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2;
+ if (usb_hcd_is_primary_hcd(hcd)) {
+ xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL);
+ if (!xhci)
+ return -ENOMEM;
+ *((struct xhci_hcd **) hcd->hcd_priv) = xhci;
+ xhci->main_hcd = hcd;
+ /* Mark the first roothub as being USB 2.0.
+ * The xHCI driver will register the USB 3.0 roothub.
+ */
+ hcd->speed = HCD_USB2;
+ hcd->self.root_hub->speed = USB_SPEED_HIGH;
+ /*
+ * USB 2.0 roothub under xHCI has an integrated TT,
+ * (rate matching hub) as opposed to having an OHCI/UHCI
+ * companion controller.
+ */
+ hcd->has_tt = 1;
+ } else {
+ /* xHCI private pointer was set in xhci_pci_probe for the second
+ * registered roothub.
+ */
+ xhci = hcd_to_xhci(hcd);
+ temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
+ if (HCC_64BIT_ADDR(temp)) {
+ xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
+ dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
+ } else {
+ dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
+ }
+ return 0;
+ }
+
xhci->cap_regs = hcd->regs;
xhci->op_regs = hcd->regs +
HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
@@ -85,13 +117,13 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
/* Make sure the HC is halted. */
retval = xhci_halt(xhci);
if (retval)
- return retval;
+ goto error;
xhci_dbg(xhci, "Resetting HCD\n");
/* Reset the internal HC memory state and registers. */
retval = xhci_reset(xhci);
if (retval)
- return retval;
+ goto error;
xhci_dbg(xhci, "Reset complete\n");
temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
@@ -106,14 +138,85 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
/* Initialize HCD and host controller data structures. */
retval = xhci_init(hcd);
if (retval)
- return retval;
+ goto error;
xhci_dbg(xhci, "Called HCD init\n");
pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &xhci->sbrn);
xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn);
/* Find any debug ports */
- return xhci_pci_reinit(xhci, pdev);
+ retval = xhci_pci_reinit(xhci, pdev);
+ if (!retval)
+ return retval;
+
+error:
+ kfree(xhci);
+ return retval;
+}
+
+/*
+ * We need to register our own PCI probe function (instead of the USB core's
+ * function) in order to create a second roothub under xHCI.
+ */
+static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ int retval;
+ struct xhci_hcd *xhci;
+ struct hc_driver *driver;
+ struct usb_hcd *hcd;
+
+ driver = (struct hc_driver *)id->driver_data;
+ /* Register the USB 2.0 roothub.
+ * FIXME: USB core must know to register the USB 2.0 roothub first.
+ * This is sort of silly, because we could just set the HCD driver flags
+ * to say USB 2.0, but I'm not sure what the implications would be in
+ * the other parts of the HCD code.
+ */
+ retval = usb_hcd_pci_probe(dev, id);
+
+ if (retval)
+ return retval;
+
+ /* USB 2.0 roothub is stored in the PCI device now. */
+ hcd = dev_get_drvdata(&dev->dev);
+ xhci = hcd_to_xhci(hcd);
+ xhci->shared_hcd = usb_create_shared_hcd(driver, &dev->dev,
+ pci_name(dev), hcd);
+ if (!xhci->shared_hcd) {
+ retval = -ENOMEM;
+ goto dealloc_usb2_hcd;
+ }
+
+ /* Set the xHCI pointer before xhci_pci_setup() (aka hcd_driver.reset)
+ * is called by usb_add_hcd().
+ */
+ *((struct xhci_hcd **) xhci->shared_hcd->hcd_priv) = xhci;
+
+ retval = usb_add_hcd(xhci->shared_hcd, dev->irq,
+ IRQF_DISABLED | IRQF_SHARED);
+ if (retval)
+ goto put_usb3_hcd;
+ /* Roothub already marked as USB 3.0 speed */
+ return 0;
+
+put_usb3_hcd:
+ usb_put_hcd(xhci->shared_hcd);
+dealloc_usb2_hcd:
+ usb_hcd_pci_remove(dev);
+ return retval;
+}
+
+static void xhci_pci_remove(struct pci_dev *dev)
+{
+ struct xhci_hcd *xhci;
+
+ xhci = hcd_to_xhci(pci_get_drvdata(dev));
+ if (xhci->shared_hcd) {
+ usb_remove_hcd(xhci->shared_hcd);
+ usb_put_hcd(xhci->shared_hcd);
+ }
+ usb_hcd_pci_remove(dev);
+ kfree(xhci);
}
#ifdef CONFIG_PM
@@ -122,7 +225,8 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int retval = 0;
- if (hcd->state != HC_STATE_SUSPENDED)
+ if (hcd->state != HC_STATE_SUSPENDED ||
+ xhci->shared_hcd->state != HC_STATE_SUSPENDED)
return -EINVAL;
retval = xhci_suspend(xhci);
@@ -143,13 +247,13 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
static const struct hc_driver xhci_pci_hc_driver = {
.description = hcd_name,
.product_desc = "xHCI Host Controller",
- .hcd_priv_size = sizeof(struct xhci_hcd),
+ .hcd_priv_size = sizeof(struct xhci_hcd *),
/*
* generic hardware linkage
*/
.irq = xhci_irq,
- .flags = HCD_MEMORY | HCD_USB3,
+ .flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED,
/*
* basic lifecycle operations
@@ -210,8 +314,8 @@ static struct pci_driver xhci_pci_driver = {
.name = (char *) hcd_name,
.id_table = pci_ids,
- .probe = usb_hcd_pci_probe,
- .remove = usb_hcd_pci_remove,
+ .probe = xhci_pci_probe,
+ .remove = xhci_pci_remove,
/* suspend and resume implemented later */
.shutdown = usb_hcd_pci_shutdown,
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index d3f0406f6f2..cfc1ad92473 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -380,10 +380,8 @@ static struct xhci_segment *find_trb_seg(
while (cur_seg->trbs > trb ||
&cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
- if ((generic_trb->field[3] & TRB_TYPE_BITMASK) ==
- TRB_TYPE(TRB_LINK) &&
- (generic_trb->field[3] & LINK_TOGGLE))
- *cycle_state = ~(*cycle_state) & 0x1;
+ if (generic_trb->field[3] & LINK_TOGGLE)
+ *cycle_state ^= 0x1;
cur_seg = cur_seg->next;
if (cur_seg == start_seg)
/* Looped over the entire list. Oops! */
@@ -497,7 +495,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
trb = &state->new_deq_ptr->generic;
if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
(trb->field[3] & LINK_TOGGLE))
- state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
+ state->new_cycle_state ^= 0x1;
next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
/*
@@ -610,13 +608,14 @@ static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
struct xhci_td *cur_td, int status, char *adjective)
{
- struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ struct usb_hcd *hcd;
struct urb *urb;
struct urb_priv *urb_priv;
urb = cur_td->urb;
urb_priv = urb->hcpriv;
urb_priv->td_cnt++;
+ hcd = bus_to_hcd(urb->dev->bus);
/* Only giveback urb when this is the last td in urb */
if (urb_priv->td_cnt == urb_priv->length) {
@@ -835,8 +834,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
if (ret < 0) {
/* This is bad; the host is not responding to commands and it's
* not allowing itself to be halted. At least interrupts are
- * disabled, so we can set HC_STATE_HALT and notify the
- * USB core. But if we call usb_hc_died(), it will attempt to
+ * disabled. If we call usb_hc_died(), it will attempt to
* disconnect all device drivers under this host. Those
* disconnect() methods will wait for all URBs to be unlinked,
* so we must complete them.
@@ -881,9 +879,8 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
}
}
spin_unlock(&xhci->lock);
- xhci_to_hcd(xhci)->state = HC_STATE_HALT;
xhci_dbg(xhci, "Calling usb_hc_died()\n");
- usb_hc_died(xhci_to_hcd(xhci));
+ usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
xhci_dbg(xhci, "xHCI host controller is dead.\n");
}
@@ -1146,7 +1143,6 @@ bandwidth_change:
handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
break;
case TRB_TYPE(TRB_CMD_NOOP):
- ++xhci->noops_handled;
break;
case TRB_TYPE(TRB_RESET_EP):
handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
@@ -1190,15 +1186,55 @@ static void handle_vendor_event(struct xhci_hcd *xhci,
handle_cmd_completion(xhci, &event->event_cmd);
}
+/* @port_id: the one-based port ID from the hardware (indexed from array of all
+ * port registers -- USB 3.0 and USB 2.0).
+ *
+ * Returns a zero-based port number, which is suitable for indexing into each of
+ * the split roothubs' port arrays and bus state arrays.
+ */
+static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
+ struct xhci_hcd *xhci, u32 port_id)
+{
+ unsigned int i;
+ unsigned int num_similar_speed_ports = 0;
+
+ /* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
+ * and usb2_ports are 0-based indexes. Count the number of similar
+ * speed ports, up to 1 port before this port.
+ */
+ for (i = 0; i < (port_id - 1); i++) {
+ u8 port_speed = xhci->port_array[i];
+
+ /*
+ * Skip ports that don't have known speeds, or have duplicate
+ * Extended Capabilities port speed entries.
+ */
+ if (port_speed == 0 || port_speed == -1)
+ continue;
+
+ /*
+ * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and
+ * 1.1 ports are under the USB 2.0 hub. If the port speed
+ * matches the device speed, it's a similar speed port.
+ */
+ if ((port_speed == 0x03) == (hcd->speed == HCD_USB3))
+ num_similar_speed_ports++;
+ }
+ return num_similar_speed_ports;
+}
+
static void handle_port_status(struct xhci_hcd *xhci,
union xhci_trb *event)
{
- struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ struct usb_hcd *hcd;
u32 port_id;
u32 temp, temp1;
- u32 __iomem *addr;
- int ports;
+ int max_ports;
int slot_id;
+ unsigned int faked_port_index;
+ u8 major_revision;
+ struct xhci_bus_state *bus_state;
+ u32 __iomem **port_array;
/* Port status change events always have a successful completion code */
if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) {
@@ -1208,14 +1244,50 @@ static void handle_port_status(struct xhci_hcd *xhci,
port_id = GET_PORT_ID(event->generic.field[0]);
xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
- ports = HCS_MAX_PORTS(xhci->hcs_params1);
- if ((port_id <= 0) || (port_id > ports)) {
+ max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+ if ((port_id <= 0) || (port_id > max_ports)) {
xhci_warn(xhci, "Invalid port id %d\n", port_id);
goto cleanup;
}
- addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS * (port_id - 1);
- temp = xhci_readl(xhci, addr);
+ /* Figure out which usb_hcd this port is attached to:
+ * is it a USB 3.0 port or a USB 2.0/1.1 port?
+ */
+ major_revision = xhci->port_array[port_id - 1];
+ if (major_revision == 0) {
+ xhci_warn(xhci, "Event for port %u not in "
+ "Extended Capabilities, ignoring.\n",
+ port_id);
+ goto cleanup;
+ }
+ if (major_revision == (u8) -1) {
+ xhci_warn(xhci, "Event for port %u duplicated in"
+ "Extended Capabilities, ignoring.\n",
+ port_id);
+ goto cleanup;
+ }
+
+ /*
+ * Hardware port IDs reported by a Port Status Change Event include USB
+ * 3.0 and USB 2.0 ports. We want to check if the port has reported a
+ * resume event, but we first need to translate the hardware port ID
+ * into the index into the ports on the correct split roothub, and the
+ * correct bus_state structure.
+ */
+ /* Find the right roothub. */
+ hcd = xhci_to_hcd(xhci);
+ if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
+ hcd = xhci->shared_hcd;
+ bus_state = &xhci->bus_state[hcd_index(hcd)];
+ if (hcd->speed == HCD_USB3)
+ port_array = xhci->usb3_ports;
+ else
+ port_array = xhci->usb2_ports;
+ /* Find the faked port hub number */
+ faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
+ port_id);
+
+ temp = xhci_readl(xhci, port_array[faked_port_index]);
if (hcd->state == HC_STATE_SUSPENDED) {
xhci_dbg(xhci, "resume root hub\n");
usb_hcd_resume_root_hub(hcd);
@@ -1235,8 +1307,9 @@ static void handle_port_status(struct xhci_hcd *xhci,
temp = xhci_port_state_to_neutral(temp);
temp &= ~PORT_PLS_MASK;
temp |= PORT_LINK_STROBE | XDEV_U0;
- xhci_writel(xhci, temp, addr);
- slot_id = xhci_find_slot_id_by_port(xhci, port_id);
+ xhci_writel(xhci, temp, port_array[faked_port_index]);
+ slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+ faked_port_index);
if (!slot_id) {
xhci_dbg(xhci, "slot_id is zero\n");
goto cleanup;
@@ -1244,16 +1317,16 @@ static void handle_port_status(struct xhci_hcd *xhci,
xhci_ring_device(xhci, slot_id);
xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
/* Clear PORT_PLC */
- temp = xhci_readl(xhci, addr);
+ temp = xhci_readl(xhci, port_array[faked_port_index]);
temp = xhci_port_state_to_neutral(temp);
temp |= PORT_PLC;
- xhci_writel(xhci, temp, addr);
+ xhci_writel(xhci, temp, port_array[faked_port_index]);
} else {
xhci_dbg(xhci, "resume HS port %d\n", port_id);
- xhci->resume_done[port_id - 1] = jiffies +
+ bus_state->resume_done[faked_port_index] = jiffies +
msecs_to_jiffies(20);
mod_timer(&hcd->rh_timer,
- xhci->resume_done[port_id - 1]);
+ bus_state->resume_done[faked_port_index]);
/* Do the rest in GetPortStatus */
}
}
@@ -1264,7 +1337,7 @@ cleanup:
spin_unlock(&xhci->lock);
/* Pass this up to the core */
- usb_hcd_poll_rh_status(xhci_to_hcd(xhci));
+ usb_hcd_poll_rh_status(hcd);
spin_lock(&xhci->lock);
}
@@ -2018,12 +2091,12 @@ cleanup:
trb_comp_code != COMP_BABBLE))
xhci_urb_free_priv(xhci, urb_priv);
- usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
+ usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
xhci_dbg(xhci, "Giveback URB %p, len = %d, "
"status = %d\n",
urb, urb->actual_length, status);
spin_unlock(&xhci->lock);
- usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
+ usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
spin_lock(&xhci->lock);
}
@@ -2147,7 +2220,6 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
xhci_warn(xhci, "WARNING: Host System Error\n");
xhci_halt(xhci);
hw_died:
- xhci_to_hcd(xhci)->state = HC_STATE_HALT;
spin_unlock(&xhci->lock);
return -ESHUTDOWN;
}
@@ -2215,8 +2287,12 @@ hw_died:
irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
{
irqreturn_t ret;
+ struct xhci_hcd *xhci;
+ xhci = hcd_to_xhci(hcd);
set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
+ if (xhci->shared_hcd)
+ set_bit(HCD_FLAG_SAW_IRQ, &xhci->shared_hcd->flags);
ret = xhci_irq(hcd);
@@ -2360,7 +2436,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
INIT_LIST_HEAD(&td->cancelled_td_list);
if (td_index == 0) {
- ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
+ ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
if (unlikely(ret)) {
xhci_urb_free_priv(xhci, urb_priv);
urb->hcpriv = NULL;
@@ -3159,24 +3235,6 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
return 0;
}
-/* Queue a no-op command on the command ring */
-static int queue_cmd_noop(struct xhci_hcd *xhci)
-{
- return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP), false);
-}
-
-/*
- * Place a no-op command on the command ring to test the command and
- * event ring.
- */
-void *xhci_setup_one_noop(struct xhci_hcd *xhci)
-{
- if (queue_cmd_noop(xhci) < 0)
- return NULL;
- xhci->noops_submitted++;
- return xhci_ring_cmd_db;
-}
-
/* Queue a slot enable or disable request on the command ring */
int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
{
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 150349d1fda..e3e13836e71 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -93,17 +93,20 @@ void xhci_quiesce(struct xhci_hcd *xhci)
*
* Disable any IRQs and clear the run/stop bit.
* HC will complete any current and actively pipelined transactions, and
- * should halt within 16 microframes of the run/stop bit being cleared.
+ * should halt within 16 ms of the run/stop bit being cleared.
* Read HC Halted bit in the status register to see when the HC is finished.
- * XXX: shouldn't we set HC_STATE_HALT here somewhere?
*/
int xhci_halt(struct xhci_hcd *xhci)
{
+ int ret;
xhci_dbg(xhci, "// Halt the HC\n");
xhci_quiesce(xhci);
- return handshake(xhci, &xhci->op_regs->status,
+ ret = handshake(xhci, &xhci->op_regs->status,
STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
+ if (!ret)
+ xhci->xhc_state |= XHCI_STATE_HALTED;
+ return ret;
}
/*
@@ -130,11 +133,13 @@ static int xhci_start(struct xhci_hcd *xhci)
xhci_err(xhci, "Host took too long to start, "
"waited %u microseconds.\n",
XHCI_MAX_HALT_USEC);
+ if (!ret)
+ xhci->xhc_state &= ~XHCI_STATE_HALTED;
return ret;
}
/*
- * Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
+ * Reset a halted HC.
*
* This resets pipelines, timers, counters, state machines, etc.
* Transactions will be terminated immediately, and operational registers
@@ -156,8 +161,6 @@ int xhci_reset(struct xhci_hcd *xhci)
command = xhci_readl(xhci, &xhci->op_regs->command);
command |= CMD_RESET;
xhci_writel(xhci, command, &xhci->op_regs->command);
- /* XXX: Why does EHCI set this here? Shouldn't other code do this? */
- xhci_to_hcd(xhci)->state = HC_STATE_HALT;
ret = handshake(xhci, &xhci->op_regs->command,
CMD_RESET, 0, 250 * 1000);
@@ -350,7 +353,6 @@ static void xhci_event_ring_work(unsigned long arg)
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
- xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled);
xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
xhci->error_bitmask = 0;
xhci_dbg(xhci, "Event ring:\n");
@@ -370,10 +372,6 @@ static void xhci_event_ring_work(unsigned long arg)
xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
}
}
-
- if (xhci->noops_submitted != NUM_TEST_NOOPS)
- if (xhci_setup_one_noop(xhci))
- xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
if (!xhci->zombie)
@@ -383,6 +381,21 @@ static void xhci_event_ring_work(unsigned long arg)
}
#endif
+static int xhci_run_finished(struct xhci_hcd *xhci)
+{
+ if (xhci_start(xhci)) {
+ xhci_halt(xhci);
+ return -ENODEV;
+ }
+ xhci->shared_hcd->state = HC_STATE_RUNNING;
+
+ if (xhci->quirks & XHCI_NEC_HOST)
+ xhci_ring_cmd_db(xhci);
+
+ xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n");
+ return 0;
+}
+
/*
* Start the HC after it was halted.
*
@@ -402,9 +415,14 @@ int xhci_run(struct usb_hcd *hcd)
u32 ret;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
- void (*doorbell)(struct xhci_hcd *) = NULL;
+
+ /* Start the xHCI host controller running only after the USB 2.0 roothub
+ * is setup.
+ */
hcd->uses_new_polling = 1;
+ if (!usb_hcd_is_primary_hcd(hcd))
+ return xhci_run_finished(xhci);
xhci_dbg(xhci, "xhci_run\n");
/* unregister the legacy interrupt */
@@ -461,7 +479,6 @@ int xhci_run(struct usb_hcd *hcd)
xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
/* Set the HCD state before we enable the irqs */
- hcd->state = HC_STATE_RUNNING;
temp = xhci_readl(xhci, &xhci->op_regs->command);
temp |= (CMD_EIE);
xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
@@ -475,24 +492,27 @@ int xhci_run(struct usb_hcd *hcd)
&xhci->ir_set->irq_pending);
xhci_print_ir_set(xhci, 0);
- if (NUM_TEST_NOOPS > 0)
- doorbell = xhci_setup_one_noop(xhci);
if (xhci->quirks & XHCI_NEC_HOST)
xhci_queue_vendor_command(xhci, 0, 0, 0,
TRB_TYPE(TRB_NEC_GET_FW));
- if (xhci_start(xhci)) {
- xhci_halt(xhci);
- return -ENODEV;
- }
+ xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n");
+ return 0;
+}
- if (doorbell)
- (*doorbell)(xhci);
- if (xhci->quirks & XHCI_NEC_HOST)
- xhci_ring_cmd_db(xhci);
+static void xhci_only_stop_hcd(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- xhci_dbg(xhci, "Finished xhci_run\n");
- return 0;
+ spin_lock_irq(&xhci->lock);
+ xhci_halt(xhci);
+
+ /* The shared_hcd is going to be deallocated shortly (the USB core only
+ * calls this function when allocation fails in usb_add_hcd(), or
+ * usb_remove_hcd() is called). So we need to unset xHCI's pointer.
+ */
+ xhci->shared_hcd = NULL;
+ spin_unlock_irq(&xhci->lock);
}
/*
@@ -509,7 +529,15 @@ void xhci_stop(struct usb_hcd *hcd)
u32 temp;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ if (!usb_hcd_is_primary_hcd(hcd)) {
+ xhci_only_stop_hcd(xhci->shared_hcd);
+ return;
+ }
+
spin_lock_irq(&xhci->lock);
+ /* Make sure the xHC is halted for a USB3 roothub
+ * (xhci_stop() could be called as part of failed init).
+ */
xhci_halt(xhci);
xhci_reset(xhci);
spin_unlock_irq(&xhci->lock);
@@ -542,6 +570,8 @@ void xhci_stop(struct usb_hcd *hcd)
* This is called when the machine is rebooting or halting. We assume that the
* machine will be powered off, and the HC's internal state will be reset.
* Don't bother to free memory.
+ *
+ * This will only ever be called with the main usb_hcd (the USB3 roothub).
*/
void xhci_shutdown(struct usb_hcd *hcd)
{
@@ -657,6 +687,7 @@ int xhci_suspend(struct xhci_hcd *xhci)
spin_lock_irq(&xhci->lock);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
/* step 1: stop endpoint */
/* skipped assuming that port suspend has done */
@@ -706,10 +737,15 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
{
u32 command, temp = 0;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
- int old_state, retval;
+ struct usb_hcd *secondary_hcd;
+ int retval;
- old_state = hcd->state;
- if (time_before(jiffies, xhci->next_statechange))
+ /* Wait a bit if either of the roothubs need to settle from the
+ * transistion into bus suspend.
+ */
+ if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
+ time_before(jiffies,
+ xhci->bus_state[1].next_statechange))
msleep(100);
spin_lock_irq(&xhci->lock);
@@ -762,16 +798,34 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
xhci_readl(xhci, &xhci->op_regs->status));
- xhci_dbg(xhci, "Initialize the HCD\n");
- retval = xhci_init(hcd);
+ /* USB core calls the PCI reinit and start functions twice:
+ * first with the primary HCD, and then with the secondary HCD.
+ * If we don't do the same, the host will never be started.
+ */
+ if (!usb_hcd_is_primary_hcd(hcd))
+ secondary_hcd = hcd;
+ else
+ secondary_hcd = xhci->shared_hcd;
+
+ xhci_dbg(xhci, "Initialize the xhci_hcd\n");
+ retval = xhci_init(hcd->primary_hcd);
if (retval)
return retval;
+ xhci_dbg(xhci, "Start the primary HCD\n");
+ retval = xhci_run(hcd->primary_hcd);
+ if (retval)
+ goto failed_restart;
- xhci_dbg(xhci, "Start the HCD\n");
- retval = xhci_run(hcd);
- if (!retval)
+ xhci_dbg(xhci, "Start the secondary HCD\n");
+ retval = xhci_run(secondary_hcd);
+ if (!retval) {
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ set_bit(HCD_FLAG_HW_ACCESSIBLE,
+ &xhci->shared_hcd->flags);
+ }
+failed_restart:
hcd->state = HC_STATE_SUSPENDED;
+ xhci->shared_hcd->state = HC_STATE_SUSPENDED;
return retval;
}
@@ -792,10 +846,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
*/
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- if (!hibernated)
- hcd->state = old_state;
- else
- hcd->state = HC_STATE_SUSPENDED;
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
spin_unlock_irq(&xhci->lock);
return 0;
@@ -1167,13 +1218,13 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
if (ret || !urb->hcpriv)
goto done;
temp = xhci_readl(xhci, &xhci->op_regs->status);
- if (temp == 0xffffffff) {
+ if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_dbg(xhci, "HW died, freeing TD.\n");
urb_priv = urb->hcpriv;
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock_irqrestore(&xhci->lock, flags);
- usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, -ESHUTDOWN);
+ usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
xhci_urb_free_priv(xhci, urb_priv);
return ret;
}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 19040c54be0..31ae71448be 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1170,8 +1170,29 @@ struct s3_save {
u64 erst_dequeue;
};
+struct xhci_bus_state {
+ unsigned long bus_suspended;
+ unsigned long next_statechange;
+
+ /* Port suspend arrays are indexed by the portnum of the fake roothub */
+ /* ports suspend status arrays - max 31 ports for USB2, 15 for USB3 */
+ u32 port_c_suspend;
+ u32 suspended_ports;
+ unsigned long resume_done[USB_MAXCHILDREN];
+};
+
+static inline unsigned int hcd_index(struct usb_hcd *hcd)
+{
+ if (hcd->speed == HCD_USB3)
+ return 0;
+ else
+ return 1;
+}
+
/* There is one ehci_hci structure per controller */
struct xhci_hcd {
+ struct usb_hcd *main_hcd;
+ struct usb_hcd *shared_hcd;
/* glue to PCI and HCD framework */
struct xhci_cap_regs __iomem *cap_regs;
struct xhci_op_regs __iomem *op_regs;
@@ -1233,9 +1254,6 @@ struct xhci_hcd {
/* Host controller watchdog timer structures */
unsigned int xhc_state;
- unsigned long bus_suspended;
- unsigned long next_statechange;
-
u32 command;
struct s3_save s3;
/* Host controller is dying - not responding to commands. "I'm not dead yet!"
@@ -1251,18 +1269,15 @@ struct xhci_hcd {
* There are no reports of xHCI host controllers that display this issue.
*/
#define XHCI_STATE_DYING (1 << 0)
+#define XHCI_STATE_HALTED (1 << 1)
/* Statistics */
- int noops_submitted;
- int noops_handled;
int error_bitmask;
unsigned int quirks;
#define XHCI_LINK_TRB_QUIRK (1 << 0)
#define XHCI_RESET_EP_QUIRK (1 << 1)
#define XHCI_NEC_HOST (1 << 2)
- u32 port_c_suspend[8]; /* port suspend change*/
- u32 suspended_ports[8]; /* which ports are
- suspended */
- unsigned long resume_done[MAX_HC_PORTS];
+ /* There are two roothubs to keep track of bus suspend info for */
+ struct xhci_bus_state bus_state[2];
/* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */
u8 *port_array;
/* Array of pointers to USB 3.0 PORTSC registers */
@@ -1273,18 +1288,15 @@ struct xhci_hcd {
unsigned int num_usb2_ports;
};
-/* For testing purposes */
-#define NUM_TEST_NOOPS 0
-
/* convert between an HCD pointer and the corresponding EHCI_HCD */
static inline struct xhci_hcd *hcd_to_xhci(struct usb_hcd *hcd)
{
- return (struct xhci_hcd *) (hcd->hcd_priv);
+ return *((struct xhci_hcd **) (hcd->hcd_priv));
}
static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
{
- return container_of((void *) xhci, struct usb_hcd, hcd_priv);
+ return xhci->main_hcd;
}
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
@@ -1480,7 +1492,6 @@ struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
dma_addr_t suspect_dma);
int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code);
void xhci_ring_cmd_db(struct xhci_hcd *xhci);
-void *xhci_setup_one_noop(struct xhci_hcd *xhci);
int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id);
int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id);
@@ -1534,7 +1545,8 @@ int xhci_bus_resume(struct usb_hcd *hcd);
#endif /* CONFIG_PM */
u32 xhci_port_state_to_neutral(u32 state);
-int xhci_find_slot_id_by_port(struct xhci_hcd *xhci, u16 port);
+int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
+ u16 port);
void xhci_ring_device(struct xhci_hcd *xhci, int slot_id);
/* xHCI contexts */
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index a35b427c0ba..388cc128072 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -83,6 +83,8 @@ static struct usb_device *testdev_to_usbdev(struct usbtest_dev *test)
#define WARNING(tdev, fmt, args...) \
dev_warn(&(tdev)->intf->dev , fmt , ## args)
+#define GUARD_BYTE 0xA5
+
/*-------------------------------------------------------------------------*/
static int
@@ -186,11 +188,12 @@ static void simple_callback(struct urb *urb)
complete(urb->context);
}
-static struct urb *simple_alloc_urb(
+static struct urb *usbtest_alloc_urb(
struct usb_device *udev,
int pipe,
- unsigned long bytes
-)
+ unsigned long bytes,
+ unsigned transfer_flags,
+ unsigned offset)
{
struct urb *urb;
@@ -201,19 +204,46 @@ static struct urb *simple_alloc_urb(
urb->interval = (udev->speed == USB_SPEED_HIGH)
? (INTERRUPT_RATE << 3)
: INTERRUPT_RATE;
- urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_flags = transfer_flags;
if (usb_pipein(pipe))
urb->transfer_flags |= URB_SHORT_NOT_OK;
- urb->transfer_buffer = usb_alloc_coherent(udev, bytes, GFP_KERNEL,
- &urb->transfer_dma);
+
+ if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
+ urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
+ GFP_KERNEL, &urb->transfer_dma);
+ else
+ urb->transfer_buffer = kmalloc(bytes + offset, GFP_KERNEL);
+
if (!urb->transfer_buffer) {
usb_free_urb(urb);
- urb = NULL;
- } else
- memset(urb->transfer_buffer, 0, bytes);
+ return NULL;
+ }
+
+ /* To test unaligned transfers add an offset and fill the
+ unused memory with a guard value */
+ if (offset) {
+ memset(urb->transfer_buffer, GUARD_BYTE, offset);
+ urb->transfer_buffer += offset;
+ if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
+ urb->transfer_dma += offset;
+ }
+
+ /* For inbound transfers use guard byte so that test fails if
+ data not correctly copied */
+ memset(urb->transfer_buffer,
+ usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
+ bytes);
return urb;
}
+static struct urb *simple_alloc_urb(
+ struct usb_device *udev,
+ int pipe,
+ unsigned long bytes)
+{
+ return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0);
+}
+
static unsigned pattern;
static unsigned mod_pattern;
module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
@@ -238,13 +268,38 @@ static inline void simple_fill_buf(struct urb *urb)
}
}
-static inline int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
+static inline unsigned buffer_offset(void *buf)
+{
+ return (unsigned)buf & (ARCH_KMALLOC_MINALIGN - 1);
+}
+
+static int check_guard_bytes(struct usbtest_dev *tdev, struct urb *urb)
+{
+ u8 *buf = urb->transfer_buffer;
+ u8 *guard = buf - buffer_offset(buf);
+ unsigned i;
+
+ for (i = 0; guard < buf; i++, guard++) {
+ if (*guard != GUARD_BYTE) {
+ ERROR(tdev, "guard byte[%d] %d (not %d)\n",
+ i, *guard, GUARD_BYTE);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
{
unsigned i;
u8 expected;
u8 *buf = urb->transfer_buffer;
unsigned len = urb->actual_length;
+ int ret = check_guard_bytes(tdev, urb);
+ if (ret)
+ return ret;
+
for (i = 0; i < len; i++, buf++) {
switch (pattern) {
/* all-zeroes has no synchronization issues */
@@ -274,8 +329,16 @@ static inline int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
static void simple_free_urb(struct urb *urb)
{
- usb_free_coherent(urb->dev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
+ unsigned offset = buffer_offset(urb->transfer_buffer);
+
+ if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
+ usb_free_coherent(
+ urb->dev,
+ urb->transfer_buffer_length + offset,
+ urb->transfer_buffer - offset,
+ urb->transfer_dma - offset);
+ else
+ kfree(urb->transfer_buffer - offset);
usb_free_urb(urb);
}
@@ -1256,7 +1319,7 @@ done:
* try whatever we're told to try.
*/
static int ctrl_out(struct usbtest_dev *dev,
- unsigned count, unsigned length, unsigned vary)
+ unsigned count, unsigned length, unsigned vary, unsigned offset)
{
unsigned i, j, len;
int retval;
@@ -1267,10 +1330,11 @@ static int ctrl_out(struct usbtest_dev *dev,
if (length < 1 || length > 0xffff || vary >= length)
return -EINVAL;
- buf = kmalloc(length, GFP_KERNEL);
+ buf = kmalloc(length + offset, GFP_KERNEL);
if (!buf)
return -ENOMEM;
+ buf += offset;
udev = testdev_to_usbdev(dev);
len = length;
retval = 0;
@@ -1337,7 +1401,7 @@ static int ctrl_out(struct usbtest_dev *dev,
ERROR(dev, "ctrl_out %s failed, code %d, count %d\n",
what, retval, i);
- kfree(buf);
+ kfree(buf - offset);
return retval;
}
@@ -1373,6 +1437,8 @@ static void iso_callback(struct urb *urb)
ctx->errors += urb->number_of_packets;
else if (urb->actual_length != urb->transfer_buffer_length)
ctx->errors++;
+ else if (check_guard_bytes(ctx->dev, urb) != 0)
+ ctx->errors++;
if (urb->status == 0 && ctx->count > (ctx->pending - 1)
&& !ctx->submit_error) {
@@ -1408,7 +1474,8 @@ static struct urb *iso_alloc_urb(
struct usb_device *udev,
int pipe,
struct usb_endpoint_descriptor *desc,
- long bytes
+ long bytes,
+ unsigned offset
)
{
struct urb *urb;
@@ -1428,13 +1495,24 @@ static struct urb *iso_alloc_urb(
urb->number_of_packets = packets;
urb->transfer_buffer_length = bytes;
- urb->transfer_buffer = usb_alloc_coherent(udev, bytes, GFP_KERNEL,
- &urb->transfer_dma);
+ urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
+ GFP_KERNEL,
+ &urb->transfer_dma);
if (!urb->transfer_buffer) {
usb_free_urb(urb);
return NULL;
}
- memset(urb->transfer_buffer, 0, bytes);
+ if (offset) {
+ memset(urb->transfer_buffer, GUARD_BYTE, offset);
+ urb->transfer_buffer += offset;
+ urb->transfer_dma += offset;
+ }
+ /* For inbound transfers use guard byte so that test fails if
+ data not correctly copied */
+ memset(urb->transfer_buffer,
+ usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
+ bytes);
+
for (i = 0; i < packets; i++) {
/* here, only the last packet will be short */
urb->iso_frame_desc[i].length = min((unsigned) bytes, maxp);
@@ -1452,7 +1530,7 @@ static struct urb *iso_alloc_urb(
static int
test_iso_queue(struct usbtest_dev *dev, struct usbtest_param *param,
- int pipe, struct usb_endpoint_descriptor *desc)
+ int pipe, struct usb_endpoint_descriptor *desc, unsigned offset)
{
struct iso_context context;
struct usb_device *udev;
@@ -1480,7 +1558,7 @@ test_iso_queue(struct usbtest_dev *dev, struct usbtest_param *param,
for (i = 0; i < param->sglen; i++) {
urbs[i] = iso_alloc_urb(udev, pipe, desc,
- param->length);
+ param->length, offset);
if (!urbs[i]) {
status = -ENOMEM;
goto fail;
@@ -1542,6 +1620,26 @@ fail:
return status;
}
+static int test_unaligned_bulk(
+ struct usbtest_dev *tdev,
+ int pipe,
+ unsigned length,
+ int iterations,
+ unsigned transfer_flags,
+ const char *label)
+{
+ int retval;
+ struct urb *urb = usbtest_alloc_urb(
+ testdev_to_usbdev(tdev), pipe, length, transfer_flags, 1);
+
+ if (!urb)
+ return -ENOMEM;
+
+ retval = simple_io(tdev, urb, iterations, 0, 0, label);
+ simple_free_urb(urb);
+ return retval;
+}
+
/*-------------------------------------------------------------------------*/
/* We only have this one interface to user space, through usbfs.
@@ -1843,7 +1941,7 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
realworld ? 1 : 0, param->length,
param->vary);
retval = ctrl_out(dev, param->iterations,
- param->length, param->vary);
+ param->length, param->vary, 0);
break;
/* iso write tests */
@@ -1856,7 +1954,7 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
param->sglen, param->length);
/* FIRMWARE: iso sink */
retval = test_iso_queue(dev, param,
- dev->out_iso_pipe, dev->iso_out);
+ dev->out_iso_pipe, dev->iso_out, 0);
break;
/* iso read tests */
@@ -1869,13 +1967,103 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
param->sglen, param->length);
/* FIRMWARE: iso source */
retval = test_iso_queue(dev, param,
- dev->in_iso_pipe, dev->iso_in);
+ dev->in_iso_pipe, dev->iso_in, 0);
break;
/* FIXME unlink from queue (ring with N urbs) */
/* FIXME scatterlist cancel (needs helper thread) */
+ /* Tests for bulk I/O using DMA mapping by core and odd address */
+ case 17:
+ if (dev->out_pipe == 0)
+ break;
+ dev_info(&intf->dev,
+ "TEST 17: write odd addr %d bytes %u times core map\n",
+ param->length, param->iterations);
+
+ retval = test_unaligned_bulk(
+ dev, dev->out_pipe,
+ param->length, param->iterations,
+ 0, "test17");
+ break;
+
+ case 18:
+ if (dev->in_pipe == 0)
+ break;
+ dev_info(&intf->dev,
+ "TEST 18: read odd addr %d bytes %u times core map\n",
+ param->length, param->iterations);
+
+ retval = test_unaligned_bulk(
+ dev, dev->in_pipe,
+ param->length, param->iterations,
+ 0, "test18");
+ break;
+
+ /* Tests for bulk I/O using premapped coherent buffer and odd address */
+ case 19:
+ if (dev->out_pipe == 0)
+ break;
+ dev_info(&intf->dev,
+ "TEST 19: write odd addr %d bytes %u times premapped\n",
+ param->length, param->iterations);
+
+ retval = test_unaligned_bulk(
+ dev, dev->out_pipe,
+ param->length, param->iterations,
+ URB_NO_TRANSFER_DMA_MAP, "test19");
+ break;
+
+ case 20:
+ if (dev->in_pipe == 0)
+ break;
+ dev_info(&intf->dev,
+ "TEST 20: read odd addr %d bytes %u times premapped\n",
+ param->length, param->iterations);
+
+ retval = test_unaligned_bulk(
+ dev, dev->in_pipe,
+ param->length, param->iterations,
+ URB_NO_TRANSFER_DMA_MAP, "test20");
+ break;
+
+ /* control write tests with unaligned buffer */
+ case 21:
+ if (!dev->info->ctrl_out)
+ break;
+ dev_info(&intf->dev,
+ "TEST 21: %d ep0out odd addr, %d..%d vary %d\n",
+ param->iterations,
+ realworld ? 1 : 0, param->length,
+ param->vary);
+ retval = ctrl_out(dev, param->iterations,
+ param->length, param->vary, 1);
+ break;
+
+ /* unaligned iso tests */
+ case 22:
+ if (dev->out_iso_pipe == 0 || param->sglen == 0)
+ break;
+ dev_info(&intf->dev,
+ "TEST 22: write %d iso odd, %d entries of %d bytes\n",
+ param->iterations,
+ param->sglen, param->length);
+ retval = test_iso_queue(dev, param,
+ dev->out_iso_pipe, dev->iso_out, 1);
+ break;
+
+ case 23:
+ if (dev->in_iso_pipe == 0 || param->sglen == 0)
+ break;
+ dev_info(&intf->dev,
+ "TEST 23: read %d iso odd, %d entries of %d bytes\n",
+ param->iterations,
+ param->sglen, param->length);
+ retval = test_iso_queue(dev, param,
+ dev->in_iso_pipe, dev->iso_in, 1);
+ break;
+
}
do_gettimeofday(&param->duration);
param->duration.tv_sec -= start.tv_sec;
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index a545d65f6e5..c302e1983c7 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -236,6 +236,9 @@ static void mon_text_event(struct mon_reader_text *rp, struct urb *urb,
fp++;
dp++;
}
+ /* Wasteful, but simple to understand: ISO 'C' is sparse. */
+ if (ev_type == 'C')
+ ep->length = urb->transfer_buffer_length;
}
ep->setup_flag = mon_text_get_setup(ep, urb, ev_type, rp->r.m_bus);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index c292d5c499e..c2915133291 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -555,7 +555,6 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
musb->ep0_stage = MUSB_EP0_START;
musb->xceiv->state = OTG_STATE_A_IDLE;
MUSB_HST_MODE(musb);
- musb_platform_set_vbus(musb, 1);
handled = IRQ_HANDLED;
}
@@ -848,12 +847,16 @@ b_host:
DBG(1, "HNP: in %s, %d msec timeout\n",
otg_state_string(musb),
TA_WAIT_BCON(musb));
+#ifdef CONFIG_USB_MUSB_OTG
mod_timer(&musb->otg_timer, jiffies
+ msecs_to_jiffies(TA_WAIT_BCON(musb)));
+#endif
break;
case OTG_STATE_A_PERIPHERAL:
musb->ignore_disconnect = 0;
+#ifdef CONFIG_USB_MUSB_OTG
del_timer(&musb->otg_timer);
+#endif
musb_g_reset(musb);
break;
case OTG_STATE_B_WAIT_ACON:
@@ -1030,6 +1033,8 @@ static void musb_shutdown(struct platform_device *pdev)
struct musb *musb = dev_to_musb(&pdev->dev);
unsigned long flags;
+ pm_runtime_get_sync(musb->controller);
+
spin_lock_irqsave(&musb->lock, flags);
musb_platform_disable(musb);
musb_generic_disable(musb);
@@ -1040,6 +1045,8 @@ static void musb_shutdown(struct platform_device *pdev)
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
musb_platform_exit(musb);
+ pm_runtime_put(musb->controller);
+
/* FIXME power down */
}
@@ -1530,7 +1537,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
/*-------------------------------------------------------------------------*/
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) || \
+#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430) || \
defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500) || \
defined(CONFIG_ARCH_U5500)
@@ -1725,6 +1732,8 @@ musb_mode_store(struct device *dev, struct device_attribute *attr,
unsigned long flags;
int status;
+ pm_runtime_get_sync(musb->controller);
+
spin_lock_irqsave(&musb->lock, flags);
if (sysfs_streq(buf, "host"))
status = musb_platform_set_mode(musb, MUSB_HOST);
@@ -1736,6 +1745,8 @@ musb_mode_store(struct device *dev, struct device_attribute *attr,
status = -EINVAL;
spin_unlock_irqrestore(&musb->lock, flags);
+ pm_runtime_put_sync(musb->controller);
+
return (status == 0) ? n : status;
}
static DEVICE_ATTR(mode, 0644, musb_mode_show, musb_mode_store);
@@ -1833,6 +1844,9 @@ static void musb_irq_work(struct work_struct *data)
struct musb *musb = container_of(data, struct musb, irq_work);
static int old_state;
+ if(musb->xceiv->state = OTG_STATE_A_IDLE)
+ musb_platform_set_vbus(musb, 1);
+
if (musb->xceiv->state != old_state) {
old_state = musb->xceiv->state;
sysfs_notify(&musb->controller->kobj, NULL, "mode");
@@ -1950,31 +1964,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
goto fail0;
}
- switch (plat->mode) {
- case MUSB_HOST:
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
- break;
-#else
- goto bad_config;
-#endif
- case MUSB_PERIPHERAL:
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
- break;
-#else
- goto bad_config;
-#endif
- case MUSB_OTG:
-#ifdef CONFIG_USB_MUSB_OTG
- break;
-#else
-bad_config:
-#endif
- default:
- dev_err(dev, "incompatible Kconfig role setting\n");
- status = -EINVAL;
- goto fail0;
- }
-
/* allocate */
musb = allocate_instance(dev, plat->config, ctrl);
if (!musb) {
@@ -1982,6 +1971,10 @@ bad_config:
goto fail0;
}
+ pm_runtime_use_autosuspend(musb->controller);
+ pm_runtime_set_autosuspend_delay(musb->controller, 200);
+ pm_runtime_enable(musb->controller);
+
spin_lock_init(&musb->lock);
musb->board_mode = plat->mode;
musb->board_set_power = plat->set_power;
@@ -2117,6 +2110,8 @@ bad_config:
if (status < 0)
goto fail3;
+ pm_runtime_put(musb->controller);
+
status = musb_init_debugfs(musb);
if (status < 0)
goto fail4;
@@ -2404,9 +2399,41 @@ static int musb_resume_noirq(struct device *dev)
return 0;
}
+static int musb_runtime_suspend(struct device *dev)
+{
+ struct musb *musb = dev_to_musb(dev);
+
+ musb_save_context(musb);
+
+ return 0;
+}
+
+static int musb_runtime_resume(struct device *dev)
+{
+ struct musb *musb = dev_to_musb(dev);
+ static int first = 1;
+
+ /*
+ * When pm_runtime_get_sync called for the first time in driver
+ * init, some of the structure is still not initialized which is
+ * used in restore function. But clock needs to be
+ * enabled before any register access, so
+ * pm_runtime_get_sync has to be called.
+ * Also context restore without save does not make
+ * any sense
+ */
+ if (!first)
+ musb_restore_context(musb);
+ first = 0;
+
+ return 0;
+}
+
static const struct dev_pm_ops musb_dev_pm_ops = {
.suspend = musb_suspend,
.resume_noirq = musb_resume_noirq,
+ .runtime_suspend = musb_runtime_suspend,
+ .runtime_resume = musb_runtime_resume,
};
#define MUSB_DEV_PM_OPS (&musb_dev_pm_ops)
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index e6400be8a0f..4bd9e2145ee 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -212,8 +212,8 @@ enum musb_g_ep0_state {
* directly with the "flat" model, or after setting up an index register.
*/
-#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_ARCH_OMAP2430) \
- || defined(CONFIG_ARCH_OMAP3430) || defined(CONFIG_BLACKFIN) \
+#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_SOC_OMAP2430) \
+ || defined(CONFIG_SOC_OMAP3430) || defined(CONFIG_BLACKFIN) \
|| defined(CONFIG_ARCH_OMAP4)
/* REVISIT indexed access seemed to
* misbehave (on DaVinci) for at least peripheral IN ...
@@ -328,7 +328,7 @@ struct musb_hw_ep {
#endif
};
-static inline struct usb_request *next_in_request(struct musb_hw_ep *hw_ep)
+static inline struct musb_request *next_in_request(struct musb_hw_ep *hw_ep)
{
#ifdef CONFIG_USB_GADGET_MUSB_HDRC
return next_request(&hw_ep->ep_in);
@@ -337,7 +337,7 @@ static inline struct usb_request *next_in_request(struct musb_hw_ep *hw_ep)
#endif
}
-static inline struct usb_request *next_out_request(struct musb_hw_ep *hw_ep)
+static inline struct musb_request *next_out_request(struct musb_hw_ep *hw_ep)
{
#ifdef CONFIG_USB_GADGET_MUSB_HDRC
return next_request(&hw_ep->ep_out);
@@ -358,10 +358,6 @@ struct musb_csr_regs {
struct musb_context_registers {
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
- defined(CONFIG_ARCH_OMAP4)
- u32 otg_sysconfig, otg_forcestandby;
-#endif
u8 power;
u16 intrtxe, intrrxe;
u8 intrusbe;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 2fe304611dc..5c7b321d395 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -189,7 +189,7 @@ __acquires(ep->musb->lock)
req = to_musb_request(request);
- list_del(&request->list);
+ list_del(&req->list);
if (req->request.status == -EINPROGRESS)
req->request.status = status;
musb = req->musb;
@@ -251,9 +251,8 @@ static void nuke(struct musb_ep *ep, const int status)
ep->dma = NULL;
}
- while (!list_empty(&(ep->req_list))) {
- req = container_of(ep->req_list.next, struct musb_request,
- request.list);
+ while (!list_empty(&ep->req_list)) {
+ req = list_first_entry(&ep->req_list, struct musb_request, list);
musb_g_giveback(ep, &req->request, status);
}
}
@@ -485,6 +484,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
void musb_g_tx(struct musb *musb, u8 epnum)
{
u16 csr;
+ struct musb_request *req;
struct usb_request *request;
u8 __iomem *mbase = musb->mregs;
struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
@@ -492,7 +492,8 @@ void musb_g_tx(struct musb *musb, u8 epnum)
struct dma_channel *dma;
musb_ep_select(mbase, epnum);
- request = next_request(musb_ep);
+ req = next_request(musb_ep);
+ request = &req->request;
csr = musb_readw(epio, MUSB_TXCSR);
DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
@@ -571,15 +572,15 @@ void musb_g_tx(struct musb *musb, u8 epnum)
if (request->actual == request->length) {
musb_g_giveback(musb_ep, request, 0);
- request = musb_ep->desc ? next_request(musb_ep) : NULL;
- if (!request) {
+ req = musb_ep->desc ? next_request(musb_ep) : NULL;
+ if (!req) {
DBG(4, "%s idle now\n",
musb_ep->end_point.name);
return;
}
}
- txstate(musb, to_musb_request(request));
+ txstate(musb, req);
}
}
@@ -821,6 +822,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
void musb_g_rx(struct musb *musb, u8 epnum)
{
u16 csr;
+ struct musb_request *req;
struct usb_request *request;
void __iomem *mbase = musb->mregs;
struct musb_ep *musb_ep;
@@ -835,10 +837,12 @@ void musb_g_rx(struct musb *musb, u8 epnum)
musb_ep_select(mbase, epnum);
- request = next_request(musb_ep);
- if (!request)
+ req = next_request(musb_ep);
+ if (!req)
return;
+ request = &req->request;
+
csr = musb_readw(epio, MUSB_RXCSR);
dma = is_dma_capable() ? musb_ep->dma : NULL;
@@ -914,15 +918,15 @@ void musb_g_rx(struct musb *musb, u8 epnum)
#endif
musb_g_giveback(musb_ep, request, 0);
- request = next_request(musb_ep);
- if (!request)
+ req = next_request(musb_ep);
+ if (!req)
return;
}
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
exit:
#endif
/* Analyze request */
- rxstate(musb, to_musb_request(request));
+ rxstate(musb, req);
}
/* ------------------------------------------------------------ */
@@ -974,7 +978,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
ok = musb->hb_iso_rx;
if (!ok) {
- DBG(4, "%s: not support ISO high bandwidth\n", __func__);
+ DBG(4, "no support for high bandwidth ISO\n");
goto fail;
}
musb_ep->hb_mult = (tmp >> 11) & 3;
@@ -998,7 +1002,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
goto fail;
if (tmp > hw_ep->max_packet_sz_tx) {
- DBG(4, "%s: packet size beyond hw fifo size\n", __func__);
+ DBG(4, "packet size beyond hardware FIFO size\n");
goto fail;
}
@@ -1038,7 +1042,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
goto fail;
if (tmp > hw_ep->max_packet_sz_rx) {
- DBG(4, "%s: packet size beyond hw fifo size\n", __func__);
+ DBG(4, "packet size beyond hardware FIFO size\n");
goto fail;
}
@@ -1171,7 +1175,6 @@ struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
return NULL;
}
- INIT_LIST_HEAD(&request->request.list);
request->request.dma = DMA_ADDR_INVALID;
request->epnum = musb_ep->current_epnum;
request->ep = musb_ep;
@@ -1257,10 +1260,10 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
}
/* add request to the list */
- list_add_tail(&(request->request.list), &(musb_ep->req_list));
+ list_add_tail(&request->list, &musb_ep->req_list);
/* it this is the head of the queue, start i/o ... */
- if (!musb_ep->busy && &request->request.list == musb_ep->req_list.next)
+ if (!musb_ep->busy && &request->list == musb_ep->req_list.next)
musb_ep_restart(musb, request);
cleanup:
@@ -1271,7 +1274,8 @@ cleanup:
static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
{
struct musb_ep *musb_ep = to_musb_ep(ep);
- struct usb_request *r;
+ struct musb_request *req = to_musb_request(request);
+ struct musb_request *r;
unsigned long flags;
int status = 0;
struct musb *musb = musb_ep->musb;
@@ -1282,10 +1286,10 @@ static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
spin_lock_irqsave(&musb->lock, flags);
list_for_each_entry(r, &musb_ep->req_list, list) {
- if (r == request)
+ if (r == req)
break;
}
- if (r != request) {
+ if (r != req) {
DBG(3, "request %p not queued to %s\n", request, ep->name);
status = -EINVAL;
goto done;
@@ -1349,7 +1353,7 @@ static int musb_gadget_set_halt(struct usb_ep *ep, int value)
musb_ep_select(mbase, epnum);
- request = to_musb_request(next_request(musb_ep));
+ request = next_request(musb_ep);
if (value) {
if (request) {
DBG(3, "request in progress, cannot halt %s\n",
@@ -1801,90 +1805,105 @@ void musb_gadget_cleanup(struct musb *musb)
int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
- int retval;
- unsigned long flags;
- struct musb *musb = the_gadget;
+ struct musb *musb = the_gadget;
+ unsigned long flags;
+ int retval = -EINVAL;
if (!driver
|| driver->speed != USB_SPEED_HIGH
|| !bind || !driver->setup)
- return -EINVAL;
+ goto err0;
/* driver must be initialized to support peripheral mode */
if (!musb) {
- DBG(1, "%s, no dev??\n", __func__);
- return -ENODEV;
+ DBG(1, "no dev??\n");
+ retval = -ENODEV;
+ goto err0;
}
+ pm_runtime_get_sync(musb->controller);
+
DBG(3, "registering driver %s\n", driver->function);
- spin_lock_irqsave(&musb->lock, flags);
if (musb->gadget_driver) {
DBG(1, "%s is already bound to %s\n",
musb_driver_name,
musb->gadget_driver->driver.name);
retval = -EBUSY;
- } else {
- musb->gadget_driver = driver;
- musb->g.dev.driver = &driver->driver;
- driver->driver.bus = NULL;
- musb->softconnect = 1;
- retval = 0;
+ goto err0;
}
+ spin_lock_irqsave(&musb->lock, flags);
+ musb->gadget_driver = driver;
+ musb->g.dev.driver = &driver->driver;
+ driver->driver.bus = NULL;
+ musb->softconnect = 1;
spin_unlock_irqrestore(&musb->lock, flags);
- if (retval == 0) {
- retval = bind(&musb->g);
- if (retval != 0) {
- DBG(3, "bind to driver %s failed --> %d\n",
- driver->driver.name, retval);
- musb->gadget_driver = NULL;
- musb->g.dev.driver = NULL;
- }
+ retval = bind(&musb->g);
+ if (retval) {
+ DBG(3, "bind to driver %s failed --> %d\n",
+ driver->driver.name, retval);
+ goto err1;
+ }
- spin_lock_irqsave(&musb->lock, flags);
+ spin_lock_irqsave(&musb->lock, flags);
- otg_set_peripheral(musb->xceiv, &musb->g);
- musb->xceiv->state = OTG_STATE_B_IDLE;
- musb->is_active = 1;
+ otg_set_peripheral(musb->xceiv, &musb->g);
+ musb->xceiv->state = OTG_STATE_B_IDLE;
+ musb->is_active = 1;
- /* FIXME this ignores the softconnect flag. Drivers are
- * allowed hold the peripheral inactive until for example
- * userspace hooks up printer hardware or DSP codecs, so
- * hosts only see fully functional devices.
- */
+ /*
+ * FIXME this ignores the softconnect flag. Drivers are
+ * allowed hold the peripheral inactive until for example
+ * userspace hooks up printer hardware or DSP codecs, so
+ * hosts only see fully functional devices.
+ */
- if (!is_otg_enabled(musb))
- musb_start(musb);
+ if (!is_otg_enabled(musb))
+ musb_start(musb);
- otg_set_peripheral(musb->xceiv, &musb->g);
+ otg_set_peripheral(musb->xceiv, &musb->g);
- spin_unlock_irqrestore(&musb->lock, flags);
+ spin_unlock_irqrestore(&musb->lock, flags);
- if (is_otg_enabled(musb)) {
- struct usb_hcd *hcd = musb_to_hcd(musb);
+ if (is_otg_enabled(musb)) {
+ struct usb_hcd *hcd = musb_to_hcd(musb);
- DBG(3, "OTG startup...\n");
+ DBG(3, "OTG startup...\n");
- /* REVISIT: funcall to other code, which also
- * handles power budgeting ... this way also
- * ensures HdrcStart is indirectly called.
- */
- retval = usb_add_hcd(musb_to_hcd(musb), -1, 0);
- if (retval < 0) {
- DBG(1, "add_hcd failed, %d\n", retval);
- spin_lock_irqsave(&musb->lock, flags);
- otg_set_peripheral(musb->xceiv, NULL);
- musb->gadget_driver = NULL;
- musb->g.dev.driver = NULL;
- spin_unlock_irqrestore(&musb->lock, flags);
- } else {
- hcd->self.uses_pio_for_control = 1;
- }
+ /* REVISIT: funcall to other code, which also
+ * handles power budgeting ... this way also
+ * ensures HdrcStart is indirectly called.
+ */
+ retval = usb_add_hcd(musb_to_hcd(musb), -1, 0);
+ if (retval < 0) {
+ DBG(1, "add_hcd failed, %d\n", retval);
+ goto err2;
+
+ if ((musb->xceiv->last_event == USB_EVENT_ID)
+ && musb->xceiv->set_vbus)
+ otg_set_vbus(musb->xceiv, 1);
}
+
+ hcd->self.uses_pio_for_control = 1;
+
+ if (musb->xceiv->last_event == USB_EVENT_NONE)
+ pm_runtime_put(musb->controller);
+
}
+ return 0;
+
+err2:
+ if (!is_otg_enabled(musb))
+ musb_stop(musb);
+
+err1:
+ musb->gadget_driver = NULL;
+ musb->g.dev.driver = NULL;
+
+err0:
return retval;
}
EXPORT_SYMBOL(usb_gadget_probe_driver);
@@ -1939,14 +1958,20 @@ static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
*/
int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
{
- unsigned long flags;
- int retval = 0;
struct musb *musb = the_gadget;
+ unsigned long flags;
if (!driver || !driver->unbind || !musb)
return -EINVAL;
- /* REVISIT always use otg_set_peripheral() here too;
+ if (!musb->gadget_driver)
+ return -EINVAL;
+
+ if (musb->xceiv->last_event == USB_EVENT_NONE)
+ pm_runtime_get_sync(musb->controller);
+
+ /*
+ * REVISIT always use otg_set_peripheral() here too;
* this needs to shut down the OTG engine.
*/
@@ -1956,29 +1981,26 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
musb_hnp_stop(musb);
#endif
- if (musb->gadget_driver == driver) {
+ (void) musb_gadget_vbus_draw(&musb->g, 0);
- (void) musb_gadget_vbus_draw(&musb->g, 0);
+ musb->xceiv->state = OTG_STATE_UNDEFINED;
+ stop_activity(musb, driver);
+ otg_set_peripheral(musb->xceiv, NULL);
- musb->xceiv->state = OTG_STATE_UNDEFINED;
- stop_activity(musb, driver);
- otg_set_peripheral(musb->xceiv, NULL);
+ DBG(3, "unregistering driver %s\n", driver->function);
- DBG(3, "unregistering driver %s\n", driver->function);
- spin_unlock_irqrestore(&musb->lock, flags);
- driver->unbind(&musb->g);
- spin_lock_irqsave(&musb->lock, flags);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ driver->unbind(&musb->g);
+ spin_lock_irqsave(&musb->lock, flags);
- musb->gadget_driver = NULL;
- musb->g.dev.driver = NULL;
+ musb->gadget_driver = NULL;
+ musb->g.dev.driver = NULL;
- musb->is_active = 0;
- musb_platform_try_idle(musb, 0);
- } else
- retval = -EINVAL;
+ musb->is_active = 0;
+ musb_platform_try_idle(musb, 0);
spin_unlock_irqrestore(&musb->lock, flags);
- if (is_otg_enabled(musb) && retval == 0) {
+ if (is_otg_enabled(musb)) {
usb_remove_hcd(musb_to_hcd(musb));
/* FIXME we need to be able to register another
* gadget driver here and have everything work;
@@ -1986,7 +2008,12 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
*/
}
- return retval;
+ if (!is_otg_enabled(musb))
+ musb_stop(musb);
+
+ pm_runtime_put(musb->controller);
+
+ return 0;
}
EXPORT_SYMBOL(usb_gadget_unregister_driver);
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
index a55354fbccf..66b7c5e0fb4 100644
--- a/drivers/usb/musb/musb_gadget.h
+++ b/drivers/usb/musb/musb_gadget.h
@@ -35,6 +35,8 @@
#ifndef __MUSB_GADGET_H
#define __MUSB_GADGET_H
+#include <linux/list.h>
+
enum buffer_map_state {
UN_MAPPED = 0,
PRE_MAPPED,
@@ -43,6 +45,7 @@ enum buffer_map_state {
struct musb_request {
struct usb_request request;
+ struct list_head list;
struct musb_ep *ep;
struct musb *musb;
u8 tx; /* endpoint direction */
@@ -94,13 +97,13 @@ static inline struct musb_ep *to_musb_ep(struct usb_ep *ep)
return ep ? container_of(ep, struct musb_ep, end_point) : NULL;
}
-static inline struct usb_request *next_request(struct musb_ep *ep)
+static inline struct musb_request *next_request(struct musb_ep *ep)
{
struct list_head *queue = &ep->req_list;
if (list_empty(queue))
return NULL;
- return container_of(queue->next, struct usb_request, list);
+ return container_of(queue->next, struct musb_request, list);
}
extern void musb_g_tx(struct musb *musb, u8 epnum);
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 6dd03f4c5f4..75a542e42fd 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -304,8 +304,7 @@ __acquires(musb->lock)
}
/* Maybe start the first request in the queue */
- request = to_musb_request(
- next_request(musb_ep));
+ request = next_request(musb_ep);
if (!musb_ep->busy && request) {
DBG(3, "restarting the request\n");
musb_ep_restart(musb, request);
@@ -491,10 +490,12 @@ stall:
static void ep0_rxstate(struct musb *musb)
{
void __iomem *regs = musb->control_ep->regs;
+ struct musb_request *request;
struct usb_request *req;
u16 count, csr;
- req = next_ep0_request(musb);
+ request = next_ep0_request(musb);
+ req = &request->request;
/* read packet and ack; or stall because of gadget driver bug:
* should have provided the rx buffer before setup() returned.
@@ -544,17 +545,20 @@ static void ep0_rxstate(struct musb *musb)
static void ep0_txstate(struct musb *musb)
{
void __iomem *regs = musb->control_ep->regs;
- struct usb_request *request = next_ep0_request(musb);
+ struct musb_request *req = next_ep0_request(musb);
+ struct usb_request *request;
u16 csr = MUSB_CSR0_TXPKTRDY;
u8 *fifo_src;
u8 fifo_count;
- if (!request) {
+ if (!req) {
/* WARN_ON(1); */
DBG(2, "odd; csr0 %04x\n", musb_readw(regs, MUSB_CSR0));
return;
}
+ request = &req->request;
+
/* load the data */
fifo_src = (u8 *) request->buf + request->actual;
fifo_count = min((unsigned) MUSB_EP0_FIFOSIZE,
@@ -598,7 +602,7 @@ static void ep0_txstate(struct musb *musb)
static void
musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req)
{
- struct usb_request *r;
+ struct musb_request *r;
void __iomem *regs = musb->control_ep->regs;
musb_read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req);
@@ -616,7 +620,7 @@ musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req)
/* clean up any leftover transfers */
r = next_ep0_request(musb);
if (r)
- musb_g_ep0_giveback(musb, r);
+ musb_g_ep0_giveback(musb, &r->request);
/* For zero-data requests we want to delay the STATUS stage to
* avoid SETUPEND errors. If we read data (OUT), delay accepting
@@ -758,11 +762,11 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
case MUSB_EP0_STAGE_STATUSOUT:
/* end of sequence #1: write to host (TX state) */
{
- struct usb_request *req;
+ struct musb_request *req;
req = next_ep0_request(musb);
if (req)
- musb_g_ep0_giveback(musb, req);
+ musb_g_ep0_giveback(musb, &req->request);
}
/*
@@ -961,7 +965,7 @@ musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags)
}
/* add request to the list */
- list_add_tail(&(req->request.list), &(ep->req_list));
+ list_add_tail(&req->list, &ep->req_list);
DBG(3, "queue to %s (%s), length=%d\n",
ep->name, ep->is_in ? "IN/TX" : "OUT/RX",
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 0f523d7db57..5eef4a8847d 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1335,7 +1335,7 @@ void musb_host_tx(struct musb *musb, u8 epnum)
if (length > qh->maxpacket)
length = qh->maxpacket;
/* Unmap the buffer so that CPU can use it */
- unmap_urb_for_dma(musb_to_hcd(musb), urb);
+ usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
qh->segsize = length;
@@ -1757,7 +1757,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
if (!dma) {
/* Unmap the buffer so that CPU can use it */
- unmap_urb_for_dma(musb_to_hcd(musb), urb);
+ usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
done = musb_host_packet_rx(musb, urb,
epnum, iso_err);
DBG(6, "read %spacket\n", done ? "last " : "");
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index b46d1877e28..489104a5ae1 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -305,8 +305,8 @@ int musb_hub_control(
desc->bHubContrCurrent = 0;
/* workaround bogus struct definition */
- desc->DeviceRemovable[0] = 0x02; /* port 1 */
- desc->DeviceRemovable[1] = 0xff;
+ desc->u.hs.DeviceRemovable[0] = 0x02; /* port 1 */
+ desc->u.hs.DeviceRemovable[1] = 0xff;
}
break;
case GetHubStatus:
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
index 21056c924c7..320fd4afb93 100644
--- a/drivers/usb/musb/musbhsdma.h
+++ b/drivers/usb/musb/musbhsdma.h
@@ -31,7 +31,7 @@
*
*/
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
+#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430)
#include "omap2430.h"
#endif
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index bc8badd1689..50d2694409d 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -33,6 +33,8 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/err.h>
#include "musb_core.h"
#include "omap2430.h"
@@ -40,7 +42,6 @@
struct omap2430_glue {
struct device *dev;
struct platform_device *musb;
- struct clk *clk;
};
#define glue_to_musb(g) platform_get_drvdata(g->musb)
@@ -216,20 +217,12 @@ static inline void omap2430_low_level_exit(struct musb *musb)
l = musb_readl(musb->mregs, OTG_FORCESTDBY);
l |= ENABLEFORCE; /* enable MSTANDBY */
musb_writel(musb->mregs, OTG_FORCESTDBY, l);
-
- l = musb_readl(musb->mregs, OTG_SYSCONFIG);
- l |= ENABLEWAKEUP; /* enable wakeup */
- musb_writel(musb->mregs, OTG_SYSCONFIG, l);
}
static inline void omap2430_low_level_init(struct musb *musb)
{
u32 l;
- l = musb_readl(musb->mregs, OTG_SYSCONFIG);
- l &= ~ENABLEWAKEUP; /* disable wakeup */
- musb_writel(musb->mregs, OTG_SYSCONFIG, l);
-
l = musb_readl(musb->mregs, OTG_FORCESTDBY);
l &= ~ENABLEFORCE; /* disable MSTANDBY */
musb_writel(musb->mregs, OTG_FORCESTDBY, l);
@@ -251,31 +244,41 @@ static int musb_otg_notifications(struct notifier_block *nb,
if (is_otg_enabled(musb)) {
#ifdef CONFIG_USB_GADGET_MUSB_HDRC
if (musb->gadget_driver) {
+ pm_runtime_get_sync(musb->controller);
otg_init(musb->xceiv);
-
- if (data->interface_type ==
- MUSB_INTERFACE_UTMI)
- omap2430_musb_set_vbus(musb, 1);
-
+ omap2430_musb_set_vbus(musb, 1);
}
#endif
} else {
+ pm_runtime_get_sync(musb->controller);
otg_init(musb->xceiv);
- if (data->interface_type ==
- MUSB_INTERFACE_UTMI)
- omap2430_musb_set_vbus(musb, 1);
+ omap2430_musb_set_vbus(musb, 1);
}
break;
case USB_EVENT_VBUS:
DBG(4, "VBUS Connect\n");
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ if (musb->gadget_driver)
+ pm_runtime_get_sync(musb->controller);
+#endif
+
otg_init(musb->xceiv);
break;
case USB_EVENT_NONE:
DBG(4, "VBUS Disconnect\n");
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ if (is_otg_enabled(musb))
+ if (musb->gadget_driver)
+#endif
+ {
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+ }
+
if (data->interface_type == MUSB_INTERFACE_UTMI) {
if (musb->xceiv->set_vbus)
otg_set_vbus(musb->xceiv, 0);
@@ -307,22 +310,11 @@ static int omap2430_musb_init(struct musb *musb)
return -ENODEV;
}
- omap2430_low_level_init(musb);
-
- l = musb_readl(musb->mregs, OTG_SYSCONFIG);
- l &= ~ENABLEWAKEUP; /* disable wakeup */
- l &= ~NOSTDBY; /* remove possible nostdby */
- l |= SMARTSTDBY; /* enable smart standby */
- l &= ~AUTOIDLE; /* disable auto idle */
- l &= ~NOIDLE; /* remove possible noidle */
- l |= SMARTIDLE; /* enable smart idle */
- /*
- * MUSB AUTOIDLE don't work in 3430.
- * Workaround by Richard Woodruff/TI
- */
- if (!cpu_is_omap3430())
- l |= AUTOIDLE; /* enable auto idle */
- musb_writel(musb->mregs, OTG_SYSCONFIG, l);
+ status = pm_runtime_get_sync(dev);
+ if (status < 0) {
+ dev_err(dev, "pm_runtime_get_sync FAILED");
+ goto err1;
+ }
l = musb_readl(musb->mregs, OTG_INTERFSEL);
@@ -350,14 +342,58 @@ static int omap2430_musb_init(struct musb *musb)
if (status)
DBG(1, "notification register failed\n");
- /* check whether cable is already connected */
- if (musb->xceiv->state ==OTG_STATE_B_IDLE)
- musb_otg_notifications(&musb->nb, 1,
- musb->xceiv->gadget);
-
setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
return 0;
+
+err1:
+ pm_runtime_disable(dev);
+ return status;
+}
+
+static void omap2430_musb_enable(struct musb *musb)
+{
+ u8 devctl;
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ struct device *dev = musb->controller;
+ struct musb_hdrc_platform_data *pdata = dev->platform_data;
+ struct omap_musb_board_data *data = pdata->board_data;
+
+ switch (musb->xceiv->last_event) {
+
+ case USB_EVENT_ID:
+ otg_init(musb->xceiv);
+ if (data->interface_type == MUSB_INTERFACE_UTMI) {
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ /* start the session */
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ while (musb_readb(musb->mregs, MUSB_DEVCTL) &
+ MUSB_DEVCTL_BDEVICE) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(musb->controller,
+ "configured as A device timeout");
+ break;
+ }
+ }
+ }
+ break;
+
+ case USB_EVENT_VBUS:
+ otg_init(musb->xceiv);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void omap2430_musb_disable(struct musb *musb)
+{
+ if (musb->xceiv->last_event)
+ otg_shutdown(musb->xceiv);
}
static int omap2430_musb_exit(struct musb *musb)
@@ -378,6 +414,9 @@ static const struct musb_platform_ops omap2430_ops = {
.try_idle = omap2430_musb_try_idle,
.set_vbus = omap2430_musb_set_vbus,
+
+ .enable = omap2430_musb_enable,
+ .disable = omap2430_musb_disable,
};
static u64 omap2430_dmamask = DMA_BIT_MASK(32);
@@ -387,8 +426,6 @@ static int __init omap2430_probe(struct platform_device *pdev)
struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
struct platform_device *musb;
struct omap2430_glue *glue;
- struct clk *clk;
-
int ret = -ENOMEM;
glue = kzalloc(sizeof(*glue), GFP_KERNEL);
@@ -403,26 +440,12 @@ static int __init omap2430_probe(struct platform_device *pdev)
goto err1;
}
- clk = clk_get(&pdev->dev, "ick");
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "failed to get clock\n");
- ret = PTR_ERR(clk);
- goto err2;
- }
-
- ret = clk_enable(clk);
- if (ret) {
- dev_err(&pdev->dev, "failed to enable clock\n");
- goto err3;
- }
-
musb->dev.parent = &pdev->dev;
musb->dev.dma_mask = &omap2430_dmamask;
musb->dev.coherent_dma_mask = omap2430_dmamask;
glue->dev = &pdev->dev;
glue->musb = musb;
- glue->clk = clk;
pdata->platform_ops = &omap2430_ops;
@@ -432,28 +455,24 @@ static int __init omap2430_probe(struct platform_device *pdev)
pdev->num_resources);
if (ret) {
dev_err(&pdev->dev, "failed to add resources\n");
- goto err4;
+ goto err2;
}
ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
if (ret) {
dev_err(&pdev->dev, "failed to add platform_data\n");
- goto err4;
+ goto err2;
}
ret = platform_device_add(musb);
if (ret) {
dev_err(&pdev->dev, "failed to register musb device\n");
- goto err4;
+ goto err2;
}
- return 0;
-
-err4:
- clk_disable(clk);
+ pm_runtime_enable(&pdev->dev);
-err3:
- clk_put(clk);
+ return 0;
err2:
platform_device_put(musb);
@@ -471,61 +490,40 @@ static int __exit omap2430_remove(struct platform_device *pdev)
platform_device_del(glue->musb);
platform_device_put(glue->musb);
- clk_disable(glue->clk);
- clk_put(glue->clk);
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
kfree(glue);
return 0;
}
#ifdef CONFIG_PM
-static void omap2430_save_context(struct musb *musb)
-{
- musb->context.otg_sysconfig = musb_readl(musb->mregs, OTG_SYSCONFIG);
- musb->context.otg_forcestandby = musb_readl(musb->mregs, OTG_FORCESTDBY);
-}
-
-static void omap2430_restore_context(struct musb *musb)
-{
- musb_writel(musb->mregs, OTG_SYSCONFIG, musb->context.otg_sysconfig);
- musb_writel(musb->mregs, OTG_FORCESTDBY, musb->context.otg_forcestandby);
-}
-static int omap2430_suspend(struct device *dev)
+static int omap2430_runtime_suspend(struct device *dev)
{
struct omap2430_glue *glue = dev_get_drvdata(dev);
struct musb *musb = glue_to_musb(glue);
omap2430_low_level_exit(musb);
otg_set_suspend(musb->xceiv, 1);
- omap2430_save_context(musb);
- clk_disable(glue->clk);
return 0;
}
-static int omap2430_resume(struct device *dev)
+static int omap2430_runtime_resume(struct device *dev)
{
struct omap2430_glue *glue = dev_get_drvdata(dev);
struct musb *musb = glue_to_musb(glue);
- int ret;
-
- ret = clk_enable(glue->clk);
- if (ret) {
- dev_err(dev, "faled to enable clock\n");
- return ret;
- }
omap2430_low_level_init(musb);
- omap2430_restore_context(musb);
otg_set_suspend(musb->xceiv, 0);
return 0;
}
static struct dev_pm_ops omap2430_pm_ops = {
- .suspend = omap2430_suspend,
- .resume = omap2430_resume,
+ .runtime_suspend = omap2430_runtime_suspend,
+ .runtime_resume = omap2430_runtime_resume,
};
#define DEV_PM_OPS (&omap2430_pm_ops)
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index c061a88f2b0..99cb541e4ef 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -680,7 +680,7 @@ dma_controller_create(struct musb *musb, void __iomem *base)
tusb_dma = kzalloc(sizeof(struct tusb_omap_dma), GFP_KERNEL);
if (!tusb_dma)
- goto cleanup;
+ goto out;
tusb_dma->musb = musb;
tusb_dma->tbase = musb->ctrl_base;
@@ -721,6 +721,6 @@ dma_controller_create(struct musb *musb, void __iomem *base)
cleanup:
dma_controller_destroy(&tusb_dma->controller);
-
+out:
return NULL;
}
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index 9ffc8237fb4..daf3e5f1a0e 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -49,6 +49,13 @@ config USB_ULPI
Enable this to support ULPI connected USB OTG transceivers which
are likely found on embedded boards.
+config USB_ULPI_VIEWPORT
+ bool
+ depends on USB_ULPI
+ help
+ Provides read/write operations to the ULPI phy register set for
+ controllers with a viewport register (e.g. Chipidea/ARC controllers).
+
config TWL4030_USB
tristate "TWL4030 USB Transceiver Driver"
depends on TWL4030_CORE && REGULATOR_TWL4030
@@ -93,7 +100,7 @@ config USB_LANGWELL_OTG
To compile this driver as a module, choose M here: the
module will be called langwell_otg.
-config USB_MSM_OTG_72K
+config USB_MSM_OTG
tristate "OTG support for Qualcomm on-chip USB controller"
depends on (USB || USB_GADGET) && ARCH_MSM
select USB_OTG_UTILS
diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
index a520e715cfd..e22d917de01 100644
--- a/drivers/usb/otg/Makefile
+++ b/drivers/usb/otg/Makefile
@@ -16,5 +16,6 @@ obj-$(CONFIG_TWL6030_USB) += twl6030-usb.o
obj-$(CONFIG_USB_LANGWELL_OTG) += langwell_otg.o
obj-$(CONFIG_NOP_USB_XCEIV) += nop-usb-xceiv.o
obj-$(CONFIG_USB_ULPI) += ulpi.o
-obj-$(CONFIG_USB_MSM_OTG_72K) += msm72k_otg.o
+obj-$(CONFIG_USB_ULPI_VIEWPORT) += ulpi_viewport.o
+obj-$(CONFIG_USB_MSM_OTG) += msm_otg.o
obj-$(CONFIG_AB8500_USB) += ab8500-usb.o
diff --git a/drivers/usb/otg/ab8500-usb.c b/drivers/usb/otg/ab8500-usb.c
index d14736b3107..07ccea9ada4 100644
--- a/drivers/usb/otg/ab8500-usb.c
+++ b/drivers/usb/otg/ab8500-usb.c
@@ -212,7 +212,7 @@ static int ab8500_usb_link_status_update(struct ab8500_usb *ab)
break;
}
- blocking_notifier_call_chain(&ab->otg.notifier, event, v);
+ atomic_notifier_call_chain(&ab->otg.notifier, event, v);
return 0;
}
@@ -281,7 +281,7 @@ static int ab8500_usb_set_power(struct otg_transceiver *otg, unsigned mA)
ab->vbus_draw = mA;
if (mA)
- blocking_notifier_call_chain(&ab->otg.notifier,
+ atomic_notifier_call_chain(&ab->otg.notifier,
USB_EVENT_ENUMERATED, ab->otg.gadget);
return 0;
}
@@ -500,7 +500,7 @@ static int __devinit ab8500_usb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ab);
- BLOCKING_INIT_NOTIFIER_HEAD(&ab->otg.notifier);
+ ATOMIC_INIT_NOTIFIER_HEAD(&ab->otg.notifier);
/* v1: Wait for link status to become stable.
* all: Updates form set_host and set_peripheral as they are atomic.
diff --git a/drivers/usb/otg/isp1301_omap.c b/drivers/usb/otg/isp1301_omap.c
index e00fa1b22ec..8c6fdef61d1 100644
--- a/drivers/usb/otg/isp1301_omap.c
+++ b/drivers/usb/otg/isp1301_omap.c
@@ -1510,7 +1510,7 @@ isp1301_start_hnp(struct otg_transceiver *dev)
/*-------------------------------------------------------------------------*/
-static int __init
+static int __devinit
isp1301_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
{
int status;
diff --git a/drivers/usb/otg/langwell_otg.c b/drivers/usb/otg/langwell_otg.c
index 9fea48264fa..7f9b8cd4514 100644
--- a/drivers/usb/otg/langwell_otg.c
+++ b/drivers/usb/otg/langwell_otg.c
@@ -174,50 +174,24 @@ static int langwell_otg_set_power(struct otg_transceiver *otg,
return 0;
}
-/* A-device drives vbus, controlled through PMIC CHRGCNTL register*/
+/* A-device drives vbus, controlled through IPC commands */
static int langwell_otg_set_vbus(struct otg_transceiver *otg, bool enabled)
{
struct langwell_otg *lnw = the_transceiver;
- u8 r;
+ u8 sub_id;
dev_dbg(lnw->dev, "%s <--- %s\n", __func__, enabled ? "on" : "off");
- /* FIXME: surely we should cache this on the first read. If not use
- readv to avoid two transactions */
- if (intel_scu_ipc_ioread8(0x00, &r) < 0) {
- dev_dbg(lnw->dev, "Failed to read PMIC register 0xD2");
- return -EBUSY;
- }
- if ((r & 0x03) != 0x02) {
- dev_dbg(lnw->dev, "not NEC PMIC attached\n");
- return -EBUSY;
- }
-
- if (intel_scu_ipc_ioread8(0x20, &r) < 0) {
- dev_dbg(lnw->dev, "Failed to read PMIC register 0xD2");
- return -EBUSY;
- }
-
- if ((r & 0x20) == 0) {
- dev_dbg(lnw->dev, "no battery attached\n");
- return -EBUSY;
- }
+ if (enabled)
+ sub_id = 0x8; /* Turn on the VBus */
+ else
+ sub_id = 0x9; /* Turn off the VBus */
- /* Workaround for battery attachment issue */
- if (r == 0x34) {
- dev_dbg(lnw->dev, "no battery attached on SH\n");
+ if (intel_scu_ipc_simple_command(0xef, sub_id)) {
+ dev_dbg(lnw->dev, "Failed to set Vbus via IPC commands\n");
return -EBUSY;
}
- dev_dbg(lnw->dev, "battery attached. 2 reg = %x\n", r);
-
- /* workaround: FW detect writing 0x20/0xc0 to d4 event.
- * this is only for NEC PMIC.
- */
-
- if (intel_scu_ipc_iowrite8(0xD4, enabled ? 0x20 : 0xC0))
- dev_dbg(lnw->dev, "Failed to write PMIC.\n");
-
dev_dbg(lnw->dev, "%s --->\n", __func__);
return 0;
@@ -394,14 +368,14 @@ static void langwell_otg_phy_low_power(int on)
dev_dbg(lnw->dev, "%s <--- done\n", __func__);
}
-/* After drv vbus, add 2 ms delay to set PHCD */
+/* After drv vbus, add 5 ms delay to set PHCD */
static void langwell_otg_phy_low_power_wait(int on)
{
struct langwell_otg *lnw = the_transceiver;
- dev_dbg(lnw->dev, "add 2ms delay before programing PHCD\n");
+ dev_dbg(lnw->dev, "add 5ms delay before programing PHCD\n");
- mdelay(2);
+ mdelay(5);
langwell_otg_phy_low_power(on);
}
diff --git a/drivers/usb/otg/msm72k_otg.c b/drivers/usb/otg/msm_otg.c
index 1cd52edcd0c..296598628b8 100644
--- a/drivers/usb/otg/msm72k_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -253,6 +253,9 @@ static int msm_otg_reset(struct otg_transceiver *otg)
}
#define PHY_SUSPEND_TIMEOUT_USEC (500 * 1000)
+#define PHY_RESUME_TIMEOUT_USEC (100 * 1000)
+
+#ifdef CONFIG_PM_SLEEP
static int msm_otg_suspend(struct msm_otg *motg)
{
struct otg_transceiver *otg = &motg->otg;
@@ -334,7 +337,6 @@ static int msm_otg_suspend(struct msm_otg *motg)
return 0;
}
-#define PHY_RESUME_TIMEOUT_USEC (100 * 1000)
static int msm_otg_resume(struct msm_otg *motg)
{
struct otg_transceiver *otg = &motg->otg;
@@ -399,6 +401,7 @@ skip_phy_resume:
return 0;
}
+#endif
static void msm_otg_start_host(struct otg_transceiver *otg, int on)
{
@@ -720,7 +723,8 @@ static int msm_otg_mode_open(struct inode *inode, struct file *file)
static ssize_t msm_otg_mode_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
- struct msm_otg *motg = file->private_data;
+ struct seq_file *s = file->private_data;
+ struct msm_otg *motg = s->private;
char buf[16];
struct otg_transceiver *otg = &motg->otg;
int status = count;
@@ -972,7 +976,7 @@ static int __devexit msm_otg_remove(struct platform_device *pdev)
msm_otg_debugfs_cleanup();
cancel_work_sync(&motg->sm_work);
- msm_otg_resume(motg);
+ pm_runtime_resume(&pdev->dev);
device_init_wakeup(&pdev->dev, 0);
pm_runtime_disable(&pdev->dev);
@@ -1050,13 +1054,9 @@ static int msm_otg_runtime_resume(struct device *dev)
dev_dbg(dev, "OTG runtime resume\n");
return msm_otg_resume(motg);
}
-#else
-#define msm_otg_runtime_idle NULL
-#define msm_otg_runtime_suspend NULL
-#define msm_otg_runtime_resume NULL
#endif
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int msm_otg_pm_suspend(struct device *dev)
{
struct msm_otg *motg = dev_get_drvdata(dev);
@@ -1086,25 +1086,24 @@ static int msm_otg_pm_resume(struct device *dev)
return 0;
}
-#else
-#define msm_otg_pm_suspend NULL
-#define msm_otg_pm_resume NULL
#endif
+#ifdef CONFIG_PM
static const struct dev_pm_ops msm_otg_dev_pm_ops = {
- .runtime_suspend = msm_otg_runtime_suspend,
- .runtime_resume = msm_otg_runtime_resume,
- .runtime_idle = msm_otg_runtime_idle,
- .suspend = msm_otg_pm_suspend,
- .resume = msm_otg_pm_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(msm_otg_pm_suspend, msm_otg_pm_resume)
+ SET_RUNTIME_PM_OPS(msm_otg_runtime_suspend, msm_otg_runtime_resume,
+ msm_otg_runtime_idle)
};
+#endif
static struct platform_driver msm_otg_driver = {
.remove = __devexit_p(msm_otg_remove),
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
+#ifdef CONFIG_PM
.pm = &msm_otg_dev_pm_ops,
+#endif
},
};
diff --git a/drivers/usb/otg/nop-usb-xceiv.c b/drivers/usb/otg/nop-usb-xceiv.c
index 8acf165fe13..c1e36004643 100644
--- a/drivers/usb/otg/nop-usb-xceiv.c
+++ b/drivers/usb/otg/nop-usb-xceiv.c
@@ -132,7 +132,7 @@ static int __devinit nop_usb_xceiv_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, nop);
- BLOCKING_INIT_NOTIFIER_HEAD(&nop->otg.notifier);
+ ATOMIC_INIT_NOTIFIER_HEAD(&nop->otg.notifier);
return 0;
exit:
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index 6ca505f333e..e01b073cc48 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -275,6 +275,8 @@ static enum usb_xceiv_events twl4030_usb_linkstat(struct twl4030_usb *twl)
dev_dbg(twl->dev, "HW_CONDITIONS 0x%02x/%d; link %d\n",
status, status, linkstat);
+ twl->otg.last_event = linkstat;
+
/* REVISIT this assumes host and peripheral controllers
* are registered, and that both are active...
*/
@@ -512,7 +514,7 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
else
twl4030_phy_resume(twl);
- blocking_notifier_call_chain(&twl->otg.notifier, status,
+ atomic_notifier_call_chain(&twl->otg.notifier, status,
twl->otg.gadget);
}
sysfs_notify(&twl->dev->kobj, NULL, "vbus");
@@ -534,7 +536,7 @@ static void twl4030_usb_phy_init(struct twl4030_usb *twl)
twl->asleep = 0;
}
- blocking_notifier_call_chain(&twl->otg.notifier, status,
+ atomic_notifier_call_chain(&twl->otg.notifier, status,
twl->otg.gadget);
}
sysfs_notify(&twl->dev->kobj, NULL, "vbus");
@@ -623,7 +625,7 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
if (device_create_file(&pdev->dev, &dev_attr_vbus))
dev_warn(&pdev->dev, "could not create sysfs file\n");
- BLOCKING_INIT_NOTIFIER_HEAD(&twl->otg.notifier);
+ ATOMIC_INIT_NOTIFIER_HEAD(&twl->otg.notifier);
/* Our job is to use irqs and status from the power module
* to keep the transceiver disabled when nothing's connected.
diff --git a/drivers/usb/otg/twl6030-usb.c b/drivers/usb/otg/twl6030-usb.c
index 28f77010364..8a91b4b832a 100644
--- a/drivers/usb/otg/twl6030-usb.c
+++ b/drivers/usb/otg/twl6030-usb.c
@@ -149,7 +149,6 @@ static int twl6030_set_phy_clk(struct otg_transceiver *x, int on)
static int twl6030_phy_init(struct otg_transceiver *x)
{
- u8 hw_state;
struct twl6030_usb *twl;
struct device *dev;
struct twl4030_usb_data *pdata;
@@ -158,11 +157,7 @@ static int twl6030_phy_init(struct otg_transceiver *x)
dev = twl->dev;
pdata = dev->platform_data;
- regulator_enable(twl->usb3v3);
-
- hw_state = twl6030_readb(twl, TWL6030_MODULE_ID0, STS_HW_CONDITIONS);
-
- if (hw_state & STS_USB_ID)
+ if (twl->linkstat == USB_EVENT_ID)
pdata->phy_power(twl->dev, 1, 1);
else
pdata->phy_power(twl->dev, 0, 1);
@@ -180,7 +175,17 @@ static void twl6030_phy_shutdown(struct otg_transceiver *x)
dev = twl->dev;
pdata = dev->platform_data;
pdata->phy_power(twl->dev, 0, 0);
- regulator_disable(twl->usb3v3);
+}
+
+static int twl6030_phy_suspend(struct otg_transceiver *x, int suspend)
+{
+ struct twl6030_usb *twl = xceiv_to_twl(x);
+ struct device *dev = twl->dev;
+ struct twl4030_usb_data *pdata = dev->platform_data;
+
+ pdata->phy_suspend(dev, suspend);
+
+ return 0;
}
static int twl6030_usb_ldo_init(struct twl6030_usb *twl)
@@ -199,16 +204,6 @@ static int twl6030_usb_ldo_init(struct twl6030_usb *twl)
if (IS_ERR(twl->usb3v3))
return -ENODEV;
- regulator_enable(twl->usb3v3);
-
- /* Program the VUSB_CFG_TRANS for ACTIVE state. */
- twl6030_writeb(twl, TWL_MODULE_PM_RECEIVER, 0x3F,
- VUSB_CFG_TRANS);
-
- /* Program the VUSB_CFG_STATE register to ON on all groups. */
- twl6030_writeb(twl, TWL_MODULE_PM_RECEIVER, 0xE1,
- VUSB_CFG_STATE);
-
/* Program the USB_VBUS_CTRL_SET and set VBUS_ACT_COMP bit */
twl6030_writeb(twl, TWL_MODULE_USB, 0x4, USB_VBUS_CTRL_SET);
@@ -261,16 +256,25 @@ static irqreturn_t twl6030_usb_irq(int irq, void *_twl)
CONTROLLER_STAT1);
if (!(hw_state & STS_USB_ID)) {
if (vbus_state & VBUS_DET) {
+ regulator_enable(twl->usb3v3);
+ twl->asleep = 1;
status = USB_EVENT_VBUS;
twl->otg.default_a = false;
twl->otg.state = OTG_STATE_B_IDLE;
+ twl->linkstat = status;
+ twl->otg.last_event = status;
+ atomic_notifier_call_chain(&twl->otg.notifier,
+ status, twl->otg.gadget);
} else {
status = USB_EVENT_NONE;
- }
- if (status >= 0) {
twl->linkstat = status;
- blocking_notifier_call_chain(&twl->otg.notifier,
+ twl->otg.last_event = status;
+ atomic_notifier_call_chain(&twl->otg.notifier,
status, twl->otg.gadget);
+ if (twl->asleep) {
+ regulator_disable(twl->usb3v3);
+ twl->asleep = 0;
+ }
}
}
sysfs_notify(&twl->dev->kobj, NULL, "vbus");
@@ -288,13 +292,17 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
if (hw_state & STS_USB_ID) {
+ regulator_enable(twl->usb3v3);
+ twl->asleep = 1;
twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_CLR, 0x1);
twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET,
0x10);
status = USB_EVENT_ID;
twl->otg.default_a = true;
twl->otg.state = OTG_STATE_A_IDLE;
- blocking_notifier_call_chain(&twl->otg.notifier, status,
+ twl->linkstat = status;
+ twl->otg.last_event = status;
+ atomic_notifier_call_chain(&twl->otg.notifier, status,
twl->otg.gadget);
} else {
twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_CLR,
@@ -303,7 +311,6 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
0x1);
}
twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_LATCH_CLR, status);
- twl->linkstat = status;
return IRQ_HANDLED;
}
@@ -395,6 +402,7 @@ static int __devinit twl6030_usb_probe(struct platform_device *pdev)
twl->otg.set_vbus = twl6030_set_vbus;
twl->otg.init = twl6030_phy_init;
twl->otg.shutdown = twl6030_phy_shutdown;
+ twl->otg.set_suspend = twl6030_phy_suspend;
/* init spinlock for workqueue */
spin_lock_init(&twl->lock);
@@ -411,7 +419,7 @@ static int __devinit twl6030_usb_probe(struct platform_device *pdev)
if (device_create_file(&pdev->dev, &dev_attr_vbus))
dev_warn(&pdev->dev, "could not create sysfs file\n");
- BLOCKING_INIT_NOTIFIER_HEAD(&twl->otg.notifier);
+ ATOMIC_INIT_NOTIFIER_HEAD(&twl->otg.notifier);
twl->irq_enabled = true;
status = request_threaded_irq(twl->irq1, NULL, twl6030_usbotg_irq,
@@ -437,7 +445,9 @@ static int __devinit twl6030_usb_probe(struct platform_device *pdev)
return status;
}
+ twl->asleep = 0;
pdata->phy_init(dev);
+ twl6030_phy_suspend(&twl->otg, 0);
twl6030_enable_irq(&twl->otg);
dev_info(&pdev->dev, "Initialized TWL6030 USB module\n");
diff --git a/drivers/usb/otg/ulpi_viewport.c b/drivers/usb/otg/ulpi_viewport.c
new file mode 100644
index 00000000000..e9a37f90994
--- /dev/null
+++ b/drivers/usb/otg/ulpi_viewport.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/usb.h>
+#include <linux/io.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/ulpi.h>
+
+#define ULPI_VIEW_WAKEUP (1 << 31)
+#define ULPI_VIEW_RUN (1 << 30)
+#define ULPI_VIEW_WRITE (1 << 29)
+#define ULPI_VIEW_READ (0 << 29)
+#define ULPI_VIEW_ADDR(x) (((x) & 0xff) << 16)
+#define ULPI_VIEW_DATA_READ(x) (((x) >> 8) & 0xff)
+#define ULPI_VIEW_DATA_WRITE(x) ((x) & 0xff)
+
+static int ulpi_viewport_wait(void __iomem *view, u32 mask)
+{
+ unsigned long usec = 2000;
+
+ while (usec--) {
+ if (!(readl(view) & mask))
+ return 0;
+
+ udelay(1);
+ };
+
+ return -ETIMEDOUT;
+}
+
+static int ulpi_viewport_read(struct otg_transceiver *otg, u32 reg)
+{
+ int ret;
+ void __iomem *view = otg->io_priv;
+
+ writel(ULPI_VIEW_WAKEUP | ULPI_VIEW_WRITE, view);
+ ret = ulpi_viewport_wait(view, ULPI_VIEW_WAKEUP);
+ if (ret)
+ return ret;
+
+ writel(ULPI_VIEW_RUN | ULPI_VIEW_READ | ULPI_VIEW_ADDR(reg), view);
+ ret = ulpi_viewport_wait(view, ULPI_VIEW_RUN);
+ if (ret)
+ return ret;
+
+ return ULPI_VIEW_DATA_READ(readl(view));
+}
+
+static int ulpi_viewport_write(struct otg_transceiver *otg, u32 val, u32 reg)
+{
+ int ret;
+ void __iomem *view = otg->io_priv;
+
+ writel(ULPI_VIEW_WAKEUP | ULPI_VIEW_WRITE, view);
+ ret = ulpi_viewport_wait(view, ULPI_VIEW_WAKEUP);
+ if (ret)
+ return ret;
+
+ writel(ULPI_VIEW_RUN | ULPI_VIEW_WRITE | ULPI_VIEW_DATA_WRITE(val) |
+ ULPI_VIEW_ADDR(reg), view);
+
+ return ulpi_viewport_wait(view, ULPI_VIEW_RUN);
+}
+
+struct otg_io_access_ops ulpi_viewport_access_ops = {
+ .read = ulpi_viewport_read,
+ .write = ulpi_viewport_write,
+};
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 31bba6ddfb2..68c16419a8d 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -725,6 +725,8 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FIC_VID, FIC_NEO1973_DEBUG_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID),
@@ -978,7 +980,7 @@ static __u32 ftdi_2232h_baud_base_to_divisor(int baud, int base)
int divisor3;
/* hi-speed baud rate is 10-bit sampling instead of 16-bit */
- divisor3 = (base / 10 / baud) * 8;
+ divisor3 = base * 8 / (baud * 10);
divisor = divisor3 >> 3;
divisor |= (__u32)divfrac[divisor3 & 0x7] << 14;
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 4e873ce579b..efffc23723b 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -733,6 +733,7 @@
/* Olimex */
#define OLIMEX_VID 0x15BA
#define OLIMEX_ARM_USB_OCD_PID 0x0003
+#define OLIMEX_ARM_USB_OCD_H_PID 0x002b
/*
* Telldus Technologies
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 3b246d93cf2..76e3e502c23 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -2343,7 +2343,6 @@ static int write_cmd_usb(struct edgeport_port *edge_port,
usb_get_serial_data(edge_port->port->serial);
int status = 0;
struct urb *urb;
- int timeout;
usb_serial_debug_data(debug, &edge_port->port->dev,
__func__, length, buffer);
@@ -2376,8 +2375,6 @@ static int write_cmd_usb(struct edgeport_port *edge_port,
return status;
}
- /* wait for command to finish */
- timeout = COMMAND_TIMEOUT;
#if 0
wait_event(&edge_port->wait_command, !edge_port->commandPending);
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 0791778a66f..67f41b52657 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -2121,16 +2121,16 @@ static int keyspan_usa49_send_setup(struct usb_serial *serial,
/* Work out which port within the device is being setup */
device_port = port->number - port->serial->minor;
- dbg("%s - endpoint %d port %d (%d)",
- __func__, usb_pipeendpoint(this_urb->pipe),
- port->number, device_port);
-
- /* Make sure we have an urb then send the message */
+ /* Make sure we have an urb then send the message */
if (this_urb == NULL) {
dbg("%s - oops no urb for port %d.", __func__, port->number);
return -1;
}
+ dbg("%s - endpoint %d port %d (%d)",
+ __func__, usb_pipeendpoint(this_urb->pipe),
+ port->number, device_port);
+
/* Save reset port val for resend.
Don't overwrite resend for open/close condition. */
if ((reset_port + 1) > p_priv->resend_cont)
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 554a8693a46..7b690f3282a 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -173,7 +173,8 @@ static void keyspan_pda_wakeup_write(struct work_struct *work)
container_of(work, struct keyspan_pda_private, wakeup_work);
struct usb_serial_port *port = priv->port;
struct tty_struct *tty = tty_port_tty_get(&port->port);
- tty_wakeup(tty);
+ if (tty)
+ tty_wakeup(tty);
tty_kref_put(tty);
}
@@ -206,7 +207,7 @@ static void keyspan_pda_request_unthrottle(struct work_struct *work)
static void keyspan_pda_rx_interrupt(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
- struct tty_struct *tty = tty_port_tty_get(&port->port);
+ struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int retval;
int status = urb->status;
@@ -223,7 +224,7 @@ static void keyspan_pda_rx_interrupt(struct urb *urb)
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d",
__func__, status);
- goto out;
+ return;
default:
dbg("%s - nonzero urb status received: %d",
__func__, status);
@@ -233,12 +234,14 @@ static void keyspan_pda_rx_interrupt(struct urb *urb)
/* see if the message is data or a status interrupt */
switch (data[0]) {
case 0:
- /* rest of message is rx data */
- if (urb->actual_length) {
+ tty = tty_port_tty_get(&port->port);
+ /* rest of message is rx data */
+ if (tty && urb->actual_length) {
tty_insert_flip_string(tty, data + 1,
urb->actual_length - 1);
tty_flip_buffer_push(tty);
}
+ tty_kref_put(tty);
break;
case 1:
/* status interrupt */
@@ -265,8 +268,6 @@ exit:
dev_err(&port->dev,
"%s - usb_submit_urb failed with result %d",
__func__, retval);
-out:
- tty_kref_put(tty);
}
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 2849f8c3201..1e225aacf46 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -78,6 +78,8 @@
#include <asm/unaligned.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
+#include <linux/serial.h>
+#include <linux/ioctl.h>
#include "mct_u232.h"
/*
@@ -104,6 +106,10 @@ static void mct_u232_break_ctl(struct tty_struct *tty, int break_state);
static int mct_u232_tiocmget(struct tty_struct *tty, struct file *file);
static int mct_u232_tiocmset(struct tty_struct *tty, struct file *file,
unsigned int set, unsigned int clear);
+static int mct_u232_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg);
+static int mct_u232_get_icount(struct tty_struct *tty,
+ struct serial_icounter_struct *icount);
static void mct_u232_throttle(struct tty_struct *tty);
static void mct_u232_unthrottle(struct tty_struct *tty);
@@ -150,9 +156,10 @@ static struct usb_serial_driver mct_u232_device = {
.tiocmset = mct_u232_tiocmset,
.attach = mct_u232_startup,
.release = mct_u232_release,
+ .ioctl = mct_u232_ioctl,
+ .get_icount = mct_u232_get_icount,
};
-
struct mct_u232_private {
spinlock_t lock;
unsigned int control_state; /* Modem Line Setting (TIOCM) */
@@ -160,6 +167,9 @@ struct mct_u232_private {
unsigned char last_lsr; /* Line Status Register */
unsigned char last_msr; /* Modem Status Register */
unsigned int rx_flags; /* Throttling flags */
+ struct async_icount icount;
+ wait_queue_head_t msr_wait; /* for handling sleeping while waiting
+ for msr change to happen */
};
#define THROTTLED 0x01
@@ -386,6 +396,20 @@ static int mct_u232_get_modem_stat(struct usb_serial *serial,
return rc;
} /* mct_u232_get_modem_stat */
+static void mct_u232_msr_to_icount(struct async_icount *icount,
+ unsigned char msr)
+{
+ /* Translate Control Line states */
+ if (msr & MCT_U232_MSR_DDSR)
+ icount->dsr++;
+ if (msr & MCT_U232_MSR_DCTS)
+ icount->cts++;
+ if (msr & MCT_U232_MSR_DRI)
+ icount->rng++;
+ if (msr & MCT_U232_MSR_DCD)
+ icount->dcd++;
+} /* mct_u232_msr_to_icount */
+
static void mct_u232_msr_to_state(unsigned int *control_state,
unsigned char msr)
{
@@ -422,6 +446,7 @@ static int mct_u232_startup(struct usb_serial *serial)
if (!priv)
return -ENOMEM;
spin_lock_init(&priv->lock);
+ init_waitqueue_head(&priv->msr_wait);
usb_set_serial_port_data(serial->port[0], priv);
init_waitqueue_head(&serial->port[0]->write_wait);
@@ -621,6 +646,8 @@ static void mct_u232_read_int_callback(struct urb *urb)
/* Record Control Line states */
mct_u232_msr_to_state(&priv->control_state, priv->last_msr);
+ mct_u232_msr_to_icount(&priv->icount, priv->last_msr);
+
#if 0
/* Not yet handled. See belkin_sa.c for further information */
/* Now to report any errors */
@@ -647,6 +674,7 @@ static void mct_u232_read_int_callback(struct urb *urb)
tty_kref_put(tty);
}
#endif
+ wake_up_interruptible(&priv->msr_wait);
spin_unlock_irqrestore(&priv->lock, flags);
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
@@ -826,7 +854,6 @@ static void mct_u232_throttle(struct tty_struct *tty)
}
}
-
static void mct_u232_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
@@ -847,6 +874,82 @@ static void mct_u232_unthrottle(struct tty_struct *tty)
}
}
+static int mct_u232_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ DEFINE_WAIT(wait);
+ struct usb_serial_port *port = tty->driver_data;
+ struct mct_u232_private *mct_u232_port = usb_get_serial_port_data(port);
+ struct async_icount cnow, cprev;
+ unsigned long flags;
+
+ dbg("%s - port %d, cmd = 0x%x", __func__, port->number, cmd);
+
+ switch (cmd) {
+
+ case TIOCMIWAIT:
+
+ dbg("%s (%d) TIOCMIWAIT", __func__, port->number);
+
+ spin_lock_irqsave(&mct_u232_port->lock, flags);
+ cprev = mct_u232_port->icount;
+ spin_unlock_irqrestore(&mct_u232_port->lock, flags);
+ for ( ; ; ) {
+ prepare_to_wait(&mct_u232_port->msr_wait,
+ &wait, TASK_INTERRUPTIBLE);
+ schedule();
+ finish_wait(&mct_u232_port->msr_wait, &wait);
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ spin_lock_irqsave(&mct_u232_port->lock, flags);
+ cnow = mct_u232_port->icount;
+ spin_unlock_irqrestore(&mct_u232_port->lock, flags);
+ if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
+ cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
+ return -EIO; /* no change => error */
+ if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
+ ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
+ ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
+ ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) {
+ return 0;
+ }
+ cprev = cnow;
+ }
+
+ }
+ return -ENOIOCTLCMD;
+}
+
+static int mct_u232_get_icount(struct tty_struct *tty,
+ struct serial_icounter_struct *icount)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct mct_u232_private *mct_u232_port = usb_get_serial_port_data(port);
+ struct async_icount *ic = &mct_u232_port->icount;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mct_u232_port->lock, flags);
+
+ icount->cts = ic->cts;
+ icount->dsr = ic->dsr;
+ icount->rng = ic->rng;
+ icount->dcd = ic->dcd;
+ icount->rx = ic->rx;
+ icount->tx = ic->tx;
+ icount->frame = ic->frame;
+ icount->overrun = ic->overrun;
+ icount->parity = ic->parity;
+ icount->brk = ic->brk;
+ icount->buf_overrun = ic->buf_overrun;
+
+ spin_unlock_irqrestore(&mct_u232_port->lock, flags);
+
+ dbg("%s (%d) TIOCGICOUNT RX=%d, TX=%d",
+ __func__, port->number, icount->rx, icount->tx);
+ return 0;
+}
+
static int __init mct_u232_init(void)
{
int retval;
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 7d3bc9a3e2b..ae506f4ee29 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -2052,7 +2052,7 @@ static int mos7720_startup(struct usb_serial *serial)
struct usb_device *dev;
int i;
char data;
- u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
+ u16 product;
int ret_val;
dbg("%s: Entering ..........", __func__);
@@ -2062,6 +2062,7 @@ static int mos7720_startup(struct usb_serial *serial)
return -ENODEV;
}
+ product = le16_to_cpu(serial->dev->descriptor.idProduct);
dev = serial->dev;
/*
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index eda1f9266c4..ce82396fc4e 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -1,6 +1,7 @@
/*
* Opticon USB barcode to serial driver
*
+ * Copyright (C) 2011 Martin Jansen <martin.jansen@opticon.com>
* Copyright (C) 2008 - 2009 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (C) 2008 - 2009 Novell Inc.
*
@@ -21,6 +22,16 @@
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
+#define CONTROL_RTS 0x02
+#define RESEND_CTS_STATE 0x03
+
+/* max number of write urbs in flight */
+#define URB_UPPER_LIMIT 8
+
+/* This driver works for the Opticon 1D barcode reader
+ * an examples of 1D barcode types are EAN, UPC, Code39, IATA etc.. */
+#define DRIVER_DESC "Opticon USB barcode to serial driver (1D)"
+
static int debug;
static const struct usb_device_id id_table[] = {
@@ -42,13 +53,13 @@ struct opticon_private {
bool throttled;
bool actually_throttled;
bool rts;
+ bool cts;
int outstanding_urbs;
};
-/* max number of write urbs in flight */
-#define URB_UPPER_LIMIT 4
-static void opticon_bulk_callback(struct urb *urb)
+
+static void opticon_read_bulk_callback(struct urb *urb)
{
struct opticon_private *priv = urb->context;
unsigned char *data = urb->transfer_buffer;
@@ -57,6 +68,7 @@ static void opticon_bulk_callback(struct urb *urb)
struct tty_struct *tty;
int result;
int data_length;
+ unsigned long flags;
dbg("%s - port %d", __func__, port->number);
@@ -87,10 +99,10 @@ static void opticon_bulk_callback(struct urb *urb)
* Data from the device comes with a 2 byte header:
*
* <0x00><0x00>data...
- * This is real data to be sent to the tty layer
+ * This is real data to be sent to the tty layer
* <0x00><0x01)level
- * This is a RTS level change, the third byte is the RTS
- * value (0 for low, 1 for high).
+ * This is a CTS level change, the third byte is the CTS
+ * value (0 for low, 1 for high).
*/
if ((data[0] == 0x00) && (data[1] == 0x00)) {
/* real data, send it to the tty layer */
@@ -103,10 +115,13 @@ static void opticon_bulk_callback(struct urb *urb)
}
} else {
if ((data[0] == 0x00) && (data[1] == 0x01)) {
+ spin_lock_irqsave(&priv->lock, flags);
+ /* CTS status infomation package */
if (data[2] == 0x00)
- priv->rts = false;
+ priv->cts = false;
else
- priv->rts = true;
+ priv->cts = true;
+ spin_unlock_irqrestore(&priv->lock, flags);
} else {
dev_dbg(&priv->udev->dev,
"Unknown data packet received from the device:"
@@ -129,7 +144,7 @@ exit:
usb_rcvbulkpipe(priv->udev,
priv->bulk_address),
priv->bulk_in_buffer, priv->buffer_size,
- opticon_bulk_callback, priv);
+ opticon_read_bulk_callback, priv);
result = usb_submit_urb(priv->bulk_read_urb, GFP_ATOMIC);
if (result)
dev_err(&port->dev,
@@ -140,6 +155,24 @@ exit:
spin_unlock(&priv->lock);
}
+static int send_control_msg(struct usb_serial_port *port, u8 requesttype,
+ u8 val)
+{
+ struct usb_serial *serial = port->serial;
+ int retval;
+ u8 buffer[2];
+
+ buffer[0] = val;
+ /* Send the message to the vendor control endpoint
+ * of the connected device */
+ retval = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
+ requesttype,
+ USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ 0, 0, buffer, 1, 0);
+
+ return retval;
+}
+
static int opticon_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct opticon_private *priv = usb_get_serial_data(port->serial);
@@ -152,19 +185,30 @@ static int opticon_open(struct tty_struct *tty, struct usb_serial_port *port)
priv->throttled = false;
priv->actually_throttled = false;
priv->port = port;
+ priv->rts = false;
spin_unlock_irqrestore(&priv->lock, flags);
- /* Start reading from the device */
+ /* Clear RTS line */
+ send_control_msg(port, CONTROL_RTS, 0);
+
+ /* Setup the read URB and start reading from the device */
usb_fill_bulk_urb(priv->bulk_read_urb, priv->udev,
usb_rcvbulkpipe(priv->udev,
priv->bulk_address),
priv->bulk_in_buffer, priv->buffer_size,
- opticon_bulk_callback, priv);
+ opticon_read_bulk_callback, priv);
+
+ /* clear the halt status of the enpoint */
+ usb_clear_halt(priv->udev, priv->bulk_read_urb->pipe);
+
result = usb_submit_urb(priv->bulk_read_urb, GFP_KERNEL);
if (result)
dev_err(&port->dev,
"%s - failed resubmitting read urb, error %d\n",
__func__, result);
+ /* Request CTS line state, sometimes during opening the current
+ * CTS state can be missed. */
+ send_control_msg(port, RESEND_CTS_STATE, 1);
return result;
}
@@ -178,7 +222,7 @@ static void opticon_close(struct usb_serial_port *port)
usb_kill_urb(priv->bulk_read_urb);
}
-static void opticon_write_bulk_callback(struct urb *urb)
+static void opticon_write_control_callback(struct urb *urb)
{
struct opticon_private *priv = urb->context;
int status = urb->status;
@@ -210,6 +254,7 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
unsigned char *buffer;
unsigned long flags;
int status;
+ struct usb_ctrlrequest *dr;
dbg("%s - port %d", __func__, port->number);
@@ -226,6 +271,7 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
if (!buffer) {
dev_err(&port->dev, "out of memory\n");
count = -ENOMEM;
+
goto error_no_buffer;
}
@@ -240,35 +286,28 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
usb_serial_debug_data(debug, &port->dev, __func__, count, buffer);
- if (port->bulk_out_endpointAddress) {
- usb_fill_bulk_urb(urb, serial->dev,
- usb_sndbulkpipe(serial->dev,
- port->bulk_out_endpointAddress),
- buffer, count, opticon_write_bulk_callback, priv);
- } else {
- struct usb_ctrlrequest *dr;
-
- dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
- if (!dr)
- return -ENOMEM;
-
- dr->bRequestType = USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT;
- dr->bRequest = 0x01;
- dr->wValue = 0;
- dr->wIndex = 0;
- dr->wLength = cpu_to_le16(count);
-
- usb_fill_control_urb(urb, serial->dev,
- usb_sndctrlpipe(serial->dev, 0),
- (unsigned char *)dr, buffer, count,
- opticon_write_bulk_callback, priv);
- }
+ /* The conncected devices do not have a bulk write endpoint,
+ * to transmit data to de barcode device the control endpoint is used */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ if (!dr)
+ return -ENOMEM;
+
+ dr->bRequestType = USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT;
+ dr->bRequest = 0x01;
+ dr->wValue = 0;
+ dr->wIndex = 0;
+ dr->wLength = cpu_to_le16(count);
+
+ usb_fill_control_urb(urb, serial->dev,
+ usb_sndctrlpipe(serial->dev, 0),
+ (unsigned char *)dr, buffer, count,
+ opticon_write_control_callback, priv);
/* send it down the pipe */
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
dev_err(&port->dev,
- "%s - usb_submit_urb(write bulk) failed with status = %d\n",
+ "%s - usb_submit_urb(write endpoint) failed status = %d\n",
__func__, status);
count = status;
goto error;
@@ -360,16 +399,49 @@ static int opticon_tiocmget(struct tty_struct *tty, struct file *file)
int result = 0;
dbg("%s - port %d", __func__, port->number);
+ if (!usb_get_intfdata(port->serial->interface))
+ return -ENODEV;
spin_lock_irqsave(&priv->lock, flags);
if (priv->rts)
- result = TIOCM_RTS;
+ result |= TIOCM_RTS;
+ if (priv->cts)
+ result |= TIOCM_CTS;
spin_unlock_irqrestore(&priv->lock, flags);
dbg("%s - %x", __func__, result);
return result;
}
+static int opticon_tiocmset(struct tty_struct *tty, struct file *file,
+ unsigned int set, unsigned int clear)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct opticon_private *priv = usb_get_serial_data(port->serial);
+ unsigned long flags;
+ bool rts;
+ bool changed = false;
+
+ if (!usb_get_intfdata(port->serial->interface))
+ return -ENODEV;
+ /* We only support RTS so we only handle that */
+ spin_lock_irqsave(&priv->lock, flags);
+
+ rts = priv->rts;
+ if (set & TIOCM_RTS)
+ priv->rts = true;
+ if (clear & TIOCM_RTS)
+ priv->rts = false;
+ changed = rts ^ priv->rts;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (!changed)
+ return 0;
+
+ /* Send the new RTS state to the connected device */
+ return send_control_msg(port, CONTROL_RTS, !rts);
+}
+
static int get_serial_info(struct opticon_private *priv,
struct serial_struct __user *serial)
{
@@ -431,6 +503,7 @@ static int opticon_startup(struct usb_serial *serial)
priv->serial = serial;
priv->port = serial->port[0];
priv->udev = serial->dev;
+ priv->outstanding_urbs = 0; /* Init the outstanding urbs */
/* find our bulk endpoint */
intf = serial->interface->altsetting;
@@ -456,13 +529,6 @@ static int opticon_startup(struct usb_serial *serial)
priv->bulk_address = endpoint->bEndpointAddress;
- /* set up our bulk urb */
- usb_fill_bulk_urb(priv->bulk_read_urb, priv->udev,
- usb_rcvbulkpipe(priv->udev,
- endpoint->bEndpointAddress),
- priv->bulk_in_buffer, priv->buffer_size,
- opticon_bulk_callback, priv);
-
bulk_in_found = true;
break;
}
@@ -558,6 +624,7 @@ static struct usb_serial_driver opticon_device = {
.unthrottle = opticon_unthrottle,
.ioctl = opticon_ioctl,
.tiocmget = opticon_tiocmget,
+ .tiocmset = opticon_tiocmset,
};
static int __init opticon_init(void)
@@ -581,6 +648,7 @@ static void __exit opticon_exit(void)
module_init(opticon_init);
module_exit(opticon_exit);
+MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(debug, bool, S_IRUGO | S_IWUSR);
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 0457813eebe..5b88b6836a8 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -376,7 +376,10 @@ static int sierra_send_setup(struct usb_serial_port *port)
if (!do_send)
return 0;
- usb_autopm_get_interface(serial->interface);
+ retval = usb_autopm_get_interface(serial->interface);
+ if (retval < 0)
+ return retval;
+
retval = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
0x22, 0x21, val, interface, NULL, 0, USB_CTRL_SET_TIMEOUT);
usb_autopm_put_interface(serial->interface);
@@ -811,8 +814,12 @@ static void sierra_close(struct usb_serial_port *port)
mutex_lock(&serial->disc_mutex);
if (!serial->disconnected) {
serial->interface->needs_remote_wakeup = 0;
- usb_autopm_get_interface(serial->interface);
- sierra_send_setup(port);
+ /* odd error handling due to pm counters */
+ if (!usb_autopm_get_interface(serial->interface))
+ sierra_send_setup(port);
+ else
+ usb_autopm_get_interface_no_resume(serial->interface);
+
}
mutex_unlock(&serial->disc_mutex);
spin_lock_irq(&intfdata->susp_lock);
@@ -865,7 +872,8 @@ static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port)
/* get rid of everything as in close */
sierra_close(port);
/* restore balance for autopm */
- usb_autopm_put_interface(serial->interface);
+ if (!serial->disconnected)
+ usb_autopm_put_interface(serial->interface);
return err;
}
sierra_send_setup(port);
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 9c014e2ecd6..eb95aecc001 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -261,7 +261,8 @@ int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port,
intfdata->in_flight--;
spin_unlock_irqrestore(&intfdata->susp_lock,
flags);
- continue;
+ usb_autopm_put_interface_async(port->serial->interface);
+ break;
}
}
@@ -308,11 +309,16 @@ static void usb_wwan_indat_callback(struct urb *urb)
/* Resubmit urb so we continue receiving */
if (status != -ESHUTDOWN) {
err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err && err != -EPERM)
- printk(KERN_ERR "%s: resubmit read urb failed. "
- "(%d)", __func__, err);
- else
+ if (err) {
+ if (err != -EPERM) {
+ printk(KERN_ERR "%s: resubmit read urb failed. "
+ "(%d)", __func__, err);
+ /* busy also in error unless we are killed */
+ usb_mark_last_busy(port->serial->dev);
+ }
+ } else {
usb_mark_last_busy(port->serial->dev);
+ }
}
}
@@ -421,6 +427,7 @@ int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port)
spin_lock_irq(&intfdata->susp_lock);
portdata->opened = 1;
spin_unlock_irq(&intfdata->susp_lock);
+ /* this balances a get in the generic USB serial code */
usb_autopm_put_interface(serial->interface);
return 0;
@@ -447,7 +454,8 @@ void usb_wwan_close(struct usb_serial_port *port)
usb_kill_urb(portdata->in_urbs[i]);
for (i = 0; i < N_OUT_URB; i++)
usb_kill_urb(portdata->out_urbs[i]);
- usb_autopm_get_interface(serial->interface);
+ /* balancing - important as an error cannot be handled*/
+ usb_autopm_get_interface_no_resume(serial->interface);
serial->interface->needs_remote_wakeup = 0;
}
}
@@ -661,6 +669,18 @@ int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message)
}
EXPORT_SYMBOL(usb_wwan_suspend);
+static void unbusy_queued_urb(struct urb *urb, struct usb_wwan_port_private *portdata)
+{
+ int i;
+
+ for (i = 0; i < N_OUT_URB; i++) {
+ if (urb == portdata->out_urbs[i]) {
+ clear_bit(i, &portdata->out_busy);
+ break;
+ }
+ }
+}
+
static void play_delayed(struct usb_serial_port *port)
{
struct usb_wwan_intf_private *data;
@@ -672,8 +692,17 @@ static void play_delayed(struct usb_serial_port *port)
data = port->serial->private;
while ((urb = usb_get_from_anchor(&portdata->delayed))) {
err = usb_submit_urb(urb, GFP_ATOMIC);
- if (!err)
+ if (!err) {
data->in_flight++;
+ } else {
+ /* we have to throw away the rest */
+ do {
+ unbusy_queued_urb(urb, portdata);
+ //extremely dirty
+ atomic_dec(&port->serial->interface->dev.power.usage_count);
+ } while ((urb = usb_get_from_anchor(&portdata->delayed)));
+ break;
+ }
}
}
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index 49a489e0371..353aeb44da6 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -31,6 +31,16 @@ config USB_STORAGE_DEBUG
Say Y here in order to have the USB Mass Storage code generate
verbose debugging messages.
+config USB_STORAGE_REALTEK
+ tristate "Realtek Card Reader support"
+ depends on USB_STORAGE
+ help
+ Say Y here to include additional code to support the power-saving function
+ for Realtek RTS51xx USB card readers.
+
+ If this driver is compiled as a module, it will be named ums-realtek.
+
+
config USB_STORAGE_DATAFAB
tristate "Datafab Compact Flash Reader support"
depends on USB_STORAGE
diff --git a/drivers/usb/storage/Makefile b/drivers/usb/storage/Makefile
index fcf14cdc4a0..0d2de971bd9 100644
--- a/drivers/usb/storage/Makefile
+++ b/drivers/usb/storage/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_USB_STORAGE_ISD200) += ums-isd200.o
obj-$(CONFIG_USB_STORAGE_JUMPSHOT) += ums-jumpshot.o
obj-$(CONFIG_USB_STORAGE_KARMA) += ums-karma.o
obj-$(CONFIG_USB_STORAGE_ONETOUCH) += ums-onetouch.o
+obj-$(CONFIG_USB_STORAGE_REALTEK) += ums-realtek.o
obj-$(CONFIG_USB_STORAGE_SDDR09) += ums-sddr09.o
obj-$(CONFIG_USB_STORAGE_SDDR55) += ums-sddr55.o
obj-$(CONFIG_USB_STORAGE_USBAT) += ums-usbat.o
@@ -42,6 +43,7 @@ ums-isd200-y := isd200.o
ums-jumpshot-y := jumpshot.o
ums-karma-y := karma.o
ums-onetouch-y := onetouch.o
+ums-realtek-y := realtek_cr.o
ums-sddr09-y := sddr09.o
ums-sddr55-y := sddr55.o
ums-usbat-y := shuttle_usbat.o
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
new file mode 100644
index 00000000000..d509a4a7d74
--- /dev/null
+++ b/drivers/usb/storage/realtek_cr.c
@@ -0,0 +1,675 @@
+/* Driver for Realtek RTS51xx USB card reader
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <linux/cdrom.h>
+
+#include <linux/usb.h>
+#include <linux/slab.h>
+#include <linux/usb_usual.h>
+
+#include "usb.h"
+#include "transport.h"
+#include "protocol.h"
+#include "debug.h"
+
+MODULE_DESCRIPTION("Driver for Realtek USB Card Reader");
+MODULE_AUTHOR("wwang <wei_wang@realsil.com.cn>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.03");
+
+static int auto_delink_en = 1;
+module_param(auto_delink_en, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(auto_delink_en, "enable auto delink");
+
+struct rts51x_status {
+ u16 vid;
+ u16 pid;
+ u8 cur_lun;
+ u8 card_type;
+ u8 total_lun;
+ u16 fw_ver;
+ u8 phy_exist;
+ u8 multi_flag;
+ u8 multi_card;
+ u8 log_exist;
+ union {
+ u8 detailed_type1;
+ u8 detailed_type2;
+ } detailed_type;
+ u8 function[2];
+};
+
+struct rts51x_chip {
+ u16 vendor_id;
+ u16 product_id;
+ char max_lun;
+
+ struct rts51x_status *status;
+ int status_len;
+
+ u32 flag;
+};
+
+/* flag definition */
+#define FLIDX_AUTO_DELINK 0x01
+
+#define SCSI_LUN(srb) ((srb)->device->lun)
+
+/* Bit Operation */
+#define SET_BIT(data, idx) ((data) |= 1 << (idx))
+#define CLR_BIT(data, idx) ((data) &= ~(1 << (idx)))
+#define CHK_BIT(data, idx) ((data) & (1 << (idx)))
+
+#define SET_AUTO_DELINK(chip) ((chip)->flag |= FLIDX_AUTO_DELINK)
+#define CLR_AUTO_DELINK(chip) ((chip)->flag &= ~FLIDX_AUTO_DELINK)
+#define CHK_AUTO_DELINK(chip) ((chip)->flag & FLIDX_AUTO_DELINK)
+
+#define RTS51X_GET_VID(chip) ((chip)->vendor_id)
+#define RTS51X_GET_PID(chip) ((chip)->product_id)
+
+#define FW_VERSION(chip) ((chip)->status[0].fw_ver)
+#define STATUS_LEN(chip) ((chip)->status_len)
+
+/* Check card reader function */
+#define SUPPORT_DETAILED_TYPE1(chip) \
+ CHK_BIT((chip)->status[0].function[0], 1)
+#define SUPPORT_OT(chip) \
+ CHK_BIT((chip)->status[0].function[0], 2)
+#define SUPPORT_OC(chip) \
+ CHK_BIT((chip)->status[0].function[0], 3)
+#define SUPPORT_AUTO_DELINK(chip) \
+ CHK_BIT((chip)->status[0].function[0], 4)
+#define SUPPORT_SDIO(chip) \
+ CHK_BIT((chip)->status[0].function[1], 0)
+#define SUPPORT_DETAILED_TYPE2(chip) \
+ CHK_BIT((chip)->status[0].function[1], 1)
+
+#define CHECK_PID(chip, pid) (RTS51X_GET_PID(chip) == (pid))
+#define CHECK_FW_VER(chip, fw_ver) (FW_VERSION(chip) == (fw_ver))
+#define CHECK_ID(chip, pid, fw_ver) \
+ (CHECK_PID((chip), (pid)) && CHECK_FW_VER((chip), (fw_ver)))
+
+#define wait_timeout_x(task_state, msecs) \
+do { \
+ set_current_state((task_state)); \
+ schedule_timeout((msecs) * HZ / 1000); \
+} while (0)
+
+#define wait_timeout(msecs) \
+ wait_timeout_x(TASK_INTERRUPTIBLE, (msecs))
+
+static int init_realtek_cr(struct us_data *us);
+
+/*
+ * The table of devices
+ */
+#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
+ vendorName, productName, useProtocol, useTransport, \
+ initFunction, flags) \
+{\
+ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
+ .driver_info = (flags)|(USB_US_TYPE_STOR<<24)\
+}
+
+static const struct usb_device_id realtek_cr_ids[] = {
+# include "unusual_realtek.h"
+ { } /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(usb, realtek_cr_ids);
+
+#undef UNUSUAL_DEV
+
+/*
+ * The flags table
+ */
+#define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \
+ vendor_name, product_name, use_protocol, use_transport, \
+ init_function, Flags) \
+{ \
+ .vendorName = vendor_name, \
+ .productName = product_name, \
+ .useProtocol = use_protocol, \
+ .useTransport = use_transport, \
+ .initFunction = init_function, \
+}
+
+static struct us_unusual_dev realtek_cr_unusual_dev_list[] = {
+# include "unusual_realtek.h"
+ { } /* Terminating entry */
+};
+
+#undef UNUSUAL_DEV
+
+static int rts51x_bulk_transport(struct us_data *us, u8 lun,
+ u8 *cmd, int cmd_len, u8 *buf, int buf_len,
+ enum dma_data_direction dir, int *act_len)
+{
+ struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
+ struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf;
+ int result;
+ unsigned int residue;
+ unsigned int cswlen;
+ unsigned int cbwlen = US_BULK_CB_WRAP_LEN;
+
+ /* set up the command wrapper */
+ bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
+ bcb->DataTransferLength = cpu_to_le32(buf_len);
+ bcb->Flags = (dir == DMA_FROM_DEVICE) ? 1 << 7 : 0;
+ bcb->Tag = ++us->tag;
+ bcb->Lun = lun;
+ bcb->Length = cmd_len;
+
+ /* copy the command payload */
+ memset(bcb->CDB, 0, sizeof(bcb->CDB));
+ memcpy(bcb->CDB, cmd, bcb->Length);
+
+ /* send it to out endpoint */
+ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
+ bcb, cbwlen, NULL);
+ if (result != USB_STOR_XFER_GOOD)
+ return USB_STOR_TRANSPORT_ERROR;
+
+ /* DATA STAGE */
+ /* send/receive data payload, if there is any */
+
+ if (buf && buf_len) {
+ unsigned int pipe = (dir == DMA_FROM_DEVICE) ?
+ us->recv_bulk_pipe : us->send_bulk_pipe;
+ result = usb_stor_bulk_transfer_buf(us, pipe,
+ buf, buf_len, NULL);
+ if (result == USB_STOR_XFER_ERROR)
+ return USB_STOR_TRANSPORT_ERROR;
+ }
+
+ /* get CSW for device status */
+ result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
+ bcs, US_BULK_CS_WRAP_LEN, &cswlen);
+ if (result != USB_STOR_XFER_GOOD)
+ return USB_STOR_TRANSPORT_ERROR;
+
+ /* check bulk status */
+ if (bcs->Signature != cpu_to_le32(US_BULK_CS_SIGN)) {
+ US_DEBUGP("Signature mismatch: got %08X, expecting %08X\n",
+ le32_to_cpu(bcs->Signature),
+ US_BULK_CS_SIGN);
+ return USB_STOR_TRANSPORT_ERROR;
+ }
+
+ residue = bcs->Residue;
+ if (bcs->Tag != us->tag)
+ return USB_STOR_TRANSPORT_ERROR;
+
+ /* try to compute the actual residue, based on how much data
+ * was really transferred and what the device tells us */
+ if (residue)
+ residue = residue < buf_len ? residue : buf_len;
+
+ if (act_len)
+ *act_len = buf_len - residue;
+
+ /* based on the status code, we report good or bad */
+ switch (bcs->Status) {
+ case US_BULK_STAT_OK:
+ /* command good -- note that data could be short */
+ return USB_STOR_TRANSPORT_GOOD;
+
+ case US_BULK_STAT_FAIL:
+ /* command failed */
+ return USB_STOR_TRANSPORT_FAILED;
+
+ case US_BULK_STAT_PHASE:
+ /* phase error -- note that a transport reset will be
+ * invoked by the invoke_transport() function
+ */
+ return USB_STOR_TRANSPORT_ERROR;
+ }
+
+ /* we should never get here, but if we do, we're in trouble */
+ return USB_STOR_TRANSPORT_ERROR;
+}
+
+/* Determine what the maximum LUN supported is */
+static int rts51x_get_max_lun(struct us_data *us)
+{
+ int result;
+
+ /* issue the command */
+ us->iobuf[0] = 0;
+ result = usb_stor_control_msg(us, us->recv_ctrl_pipe,
+ US_BULK_GET_MAX_LUN,
+ USB_DIR_IN | USB_TYPE_CLASS |
+ USB_RECIP_INTERFACE,
+ 0, us->ifnum, us->iobuf, 1, 10*HZ);
+
+ US_DEBUGP("GetMaxLUN command result is %d, data is %d\n",
+ result, us->iobuf[0]);
+
+ /* if we have a successful request, return the result */
+ if (result > 0)
+ return us->iobuf[0];
+
+ return 0;
+}
+
+static int rts51x_read_mem(struct us_data *us, u16 addr, u8 *data, u16 len)
+{
+ int retval;
+ u8 cmnd[12] = {0};
+
+ US_DEBUGP("%s, addr = 0x%x, len = %d\n", __func__, addr, len);
+
+ cmnd[0] = 0xF0;
+ cmnd[1] = 0x0D;
+ cmnd[2] = (u8)(addr >> 8);
+ cmnd[3] = (u8)addr;
+ cmnd[4] = (u8)(len >> 8);
+ cmnd[5] = (u8)len;
+
+ retval = rts51x_bulk_transport(us, 0, cmnd, 12,
+ data, len, DMA_FROM_DEVICE, NULL);
+ if (retval != USB_STOR_TRANSPORT_GOOD)
+ return -EIO;
+
+ return 0;
+}
+
+static int rts51x_write_mem(struct us_data *us, u16 addr, u8 *data, u16 len)
+{
+ int retval;
+ u8 cmnd[12] = {0};
+
+ US_DEBUGP("%s, addr = 0x%x, len = %d\n", __func__, addr, len);
+
+ cmnd[0] = 0xF0;
+ cmnd[1] = 0x0E;
+ cmnd[2] = (u8)(addr >> 8);
+ cmnd[3] = (u8)addr;
+ cmnd[4] = (u8)(len >> 8);
+ cmnd[5] = (u8)len;
+
+ retval = rts51x_bulk_transport(us, 0, cmnd, 12,
+ data, len, DMA_TO_DEVICE, NULL);
+ if (retval != USB_STOR_TRANSPORT_GOOD)
+ return -EIO;
+
+ return 0;
+}
+
+static int rts51x_read_status(struct us_data *us,
+ u8 lun, u8 *status, int len, int *actlen)
+{
+ int retval;
+ u8 cmnd[12] = {0};
+
+ US_DEBUGP("%s, lun = %d\n", __func__, lun);
+
+ cmnd[0] = 0xF0;
+ cmnd[1] = 0x09;
+
+ retval = rts51x_bulk_transport(us, lun, cmnd, 12,
+ status, len, DMA_FROM_DEVICE, actlen);
+ if (retval != USB_STOR_TRANSPORT_GOOD)
+ return -EIO;
+
+ return 0;
+}
+
+static int rts51x_check_status(struct us_data *us, u8 lun)
+{
+ struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra);
+ int retval;
+ u8 buf[16];
+
+ retval = rts51x_read_status(us, lun, buf, 16, &(chip->status_len));
+ if (retval < 0)
+ return -EIO;
+
+ US_DEBUGP("chip->status_len = %d\n", chip->status_len);
+
+ chip->status[lun].vid = ((u16)buf[0] << 8) | buf[1];
+ chip->status[lun].pid = ((u16)buf[2] << 8) | buf[3];
+ chip->status[lun].cur_lun = buf[4];
+ chip->status[lun].card_type = buf[5];
+ chip->status[lun].total_lun = buf[6];
+ chip->status[lun].fw_ver = ((u16)buf[7] << 8) | buf[8];
+ chip->status[lun].phy_exist = buf[9];
+ chip->status[lun].multi_flag = buf[10];
+ chip->status[lun].multi_card = buf[11];
+ chip->status[lun].log_exist = buf[12];
+ if (chip->status_len == 16) {
+ chip->status[lun].detailed_type.detailed_type1 = buf[13];
+ chip->status[lun].function[0] = buf[14];
+ chip->status[lun].function[1] = buf[15];
+ }
+
+ return 0;
+}
+
+static int enable_oscillator(struct us_data *us)
+{
+ int retval;
+ u8 value;
+
+ retval = rts51x_read_mem(us, 0xFE77, &value, 1);
+ if (retval < 0)
+ return -EIO;
+
+ value |= 0x04;
+ retval = rts51x_write_mem(us, 0xFE77, &value, 1);
+ if (retval < 0)
+ return -EIO;
+
+ retval = rts51x_read_mem(us, 0xFE77, &value, 1);
+ if (retval < 0)
+ return -EIO;
+
+ if (!(value & 0x04))
+ return -EIO;
+
+ return 0;
+}
+
+static int do_config_autodelink(struct us_data *us, int enable, int force)
+{
+ int retval;
+ u8 value;
+
+ retval = rts51x_read_mem(us, 0xFE47, &value, 1);
+ if (retval < 0)
+ return -EIO;
+
+ if (enable) {
+ if (force)
+ value |= 0x03;
+ else
+ value |= 0x01;
+ } else {
+ value &= ~0x03;
+ }
+
+ US_DEBUGP("In %s,set 0xfe47 to 0x%x\n", __func__, value);
+
+ retval = rts51x_write_mem(us, 0xFE47, &value, 1);
+ if (retval < 0)
+ return -EIO;
+
+ return 0;
+}
+
+static int config_autodelink_after_power_on(struct us_data *us)
+{
+ struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra);
+ int retval;
+ u8 value;
+
+ if (!CHK_AUTO_DELINK(chip))
+ return 0;
+
+ retval = rts51x_read_mem(us, 0xFE47, &value, 1);
+ if (retval < 0)
+ return -EIO;
+
+ if (auto_delink_en) {
+ CLR_BIT(value, 0);
+ CLR_BIT(value, 1);
+ SET_BIT(value, 2);
+
+ if (CHECK_ID(chip, 0x0138, 0x3882))
+ CLR_BIT(value, 2);
+
+ SET_BIT(value, 7);
+
+ retval = rts51x_write_mem(us, 0xFE47, &value, 1);
+ if (retval < 0)
+ return -EIO;
+
+ retval = enable_oscillator(us);
+ if (retval == 0)
+ (void)do_config_autodelink(us, 1, 0);
+ } else {
+ /* Autodelink controlled by firmware */
+
+ SET_BIT(value, 2);
+
+ if (CHECK_ID(chip, 0x0138, 0x3882))
+ CLR_BIT(value, 2);
+
+ if (CHECK_ID(chip, 0x0159, 0x5889) ||
+ CHECK_ID(chip, 0x0138, 0x3880)) {
+ CLR_BIT(value, 0);
+ CLR_BIT(value, 7);
+ }
+
+ retval = rts51x_write_mem(us, 0xFE47, &value, 1);
+ if (retval < 0)
+ return -EIO;
+
+ if (CHECK_ID(chip, 0x0159, 0x5888)) {
+ value = 0xFF;
+ retval = rts51x_write_mem(us, 0xFE79, &value, 1);
+ if (retval < 0)
+ return -EIO;
+
+ value = 0x01;
+ retval = rts51x_write_mem(us, 0x48, &value, 1);
+ if (retval < 0)
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int config_autodelink_before_power_down(struct us_data *us)
+{
+ struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra);
+ int retval;
+ u8 value;
+
+ if (!CHK_AUTO_DELINK(chip))
+ return 0;
+
+ if (auto_delink_en) {
+ retval = rts51x_read_mem(us, 0xFE77, &value, 1);
+ if (retval < 0)
+ return -EIO;
+
+ SET_BIT(value, 2);
+ retval = rts51x_write_mem(us, 0xFE77, &value, 1);
+ if (retval < 0)
+ return -EIO;
+
+ if (CHECK_ID(chip, 0x0159, 0x5888)) {
+ value = 0x01;
+ retval = rts51x_write_mem(us, 0x48, &value, 1);
+ if (retval < 0)
+ return -EIO;
+ }
+
+ retval = rts51x_read_mem(us, 0xFE47, &value, 1);
+ if (retval < 0)
+ return -EIO;
+
+ SET_BIT(value, 0);
+ if (CHECK_ID(chip, 0x0138, 0x3882))
+ SET_BIT(value, 2);
+ retval = rts51x_write_mem(us, 0xFE77, &value, 1);
+ if (retval < 0)
+ return -EIO;
+ } else {
+ if (CHECK_ID(chip, 0x0159, 0x5889) ||
+ CHECK_ID(chip, 0x0138, 0x3880) ||
+ CHECK_ID(chip, 0x0138, 0x3882)) {
+ retval = rts51x_read_mem(us, 0xFE47, &value, 1);
+ if (retval < 0)
+ return -EIO;
+
+ if (CHECK_ID(chip, 0x0159, 0x5889) ||
+ CHECK_ID(chip, 0x0138, 0x3880)) {
+ SET_BIT(value, 0);
+ SET_BIT(value, 7);
+ }
+
+ if (CHECK_ID(chip, 0x0138, 0x3882))
+ SET_BIT(value, 2);
+
+ retval = rts51x_write_mem(us, 0xFE47, &value, 1);
+ if (retval < 0)
+ return -EIO;
+ }
+
+ if (CHECK_ID(chip, 0x0159, 0x5888)) {
+ value = 0x01;
+ retval = rts51x_write_mem(us, 0x48, &value, 1);
+ if (retval < 0)
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static void realtek_cr_destructor(void *extra)
+{
+ struct rts51x_chip *chip = (struct rts51x_chip *)extra;
+
+ if (!chip)
+ return;
+
+ kfree(chip->status);
+}
+
+#ifdef CONFIG_PM
+static void realtek_pm_hook(struct us_data *us, int pm_state)
+{
+ if (pm_state == US_SUSPEND)
+ (void)config_autodelink_before_power_down(us);
+}
+#endif
+
+static int init_realtek_cr(struct us_data *us)
+{
+ struct rts51x_chip *chip;
+ int size, i, retval;
+
+ chip = kzalloc(sizeof(struct rts51x_chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ us->extra = chip;
+ us->extra_destructor = realtek_cr_destructor;
+#ifdef CONFIG_PM
+ us->suspend_resume_hook = realtek_pm_hook;
+#endif
+
+ us->max_lun = chip->max_lun = rts51x_get_max_lun(us);
+
+ US_DEBUGP("chip->max_lun = %d\n", chip->max_lun);
+
+ size = (chip->max_lun + 1) * sizeof(struct rts51x_status);
+ chip->status = kzalloc(size, GFP_KERNEL);
+ if (!chip->status)
+ goto INIT_FAIL;
+
+ for (i = 0; i <= (int)(chip->max_lun); i++) {
+ retval = rts51x_check_status(us, (u8)i);
+ if (retval < 0)
+ goto INIT_FAIL;
+ }
+
+ if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
+ CHECK_FW_VER(chip, 0x5901))
+ SET_AUTO_DELINK(chip);
+ if (STATUS_LEN(chip) == 16) {
+ if (SUPPORT_AUTO_DELINK(chip))
+ SET_AUTO_DELINK(chip);
+ }
+
+ US_DEBUGP("chip->flag = 0x%x\n", chip->flag);
+
+ (void)config_autodelink_after_power_on(us);
+
+ return 0;
+
+INIT_FAIL:
+ if (us->extra) {
+ kfree(chip->status);
+ kfree(us->extra);
+ us->extra = NULL;
+ }
+
+ return -EIO;
+}
+
+static int realtek_cr_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct us_data *us;
+ int result;
+
+ US_DEBUGP("Probe Realtek Card Reader!\n");
+
+ result = usb_stor_probe1(&us, intf, id,
+ (id - realtek_cr_ids) + realtek_cr_unusual_dev_list);
+ if (result)
+ return result;
+
+ result = usb_stor_probe2(us);
+ return result;
+}
+
+static struct usb_driver realtek_cr_driver = {
+ .name = "ums-realtek",
+ .probe = realtek_cr_probe,
+ .disconnect = usb_stor_disconnect,
+ .suspend = usb_stor_suspend,
+ .resume = usb_stor_resume,
+ .reset_resume = usb_stor_reset_resume,
+ .pre_reset = usb_stor_pre_reset,
+ .post_reset = usb_stor_post_reset,
+ .id_table = realtek_cr_ids,
+ .soft_unbind = 1,
+};
+
+static int __init realtek_cr_init(void)
+{
+ return usb_register(&realtek_cr_driver);
+}
+
+static void __exit realtek_cr_exit(void)
+{
+ usb_deregister(&realtek_cr_driver);
+}
+
+module_init(realtek_cr_init);
+module_exit(realtek_cr_exit);
diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c
index ceba512f84d..1deca07c826 100644
--- a/drivers/usb/storage/sierra_ms.c
+++ b/drivers/usb/storage/sierra_ms.c
@@ -126,13 +126,11 @@ static DEVICE_ATTR(truinst, S_IRUGO, show_truinst, NULL);
int sierra_ms_init(struct us_data *us)
{
int result, retries;
- signed long delay_t;
struct swoc_info *swocInfo;
struct usb_device *udev;
struct Scsi_Host *sh;
struct scsi_device *sd;
- delay_t = 2;
retries = 3;
result = 0;
udev = us->pusb_dev;
diff --git a/drivers/usb/storage/unusual_realtek.h b/drivers/usb/storage/unusual_realtek.h
new file mode 100644
index 00000000000..3236e032851
--- /dev/null
+++ b/drivers/usb/storage/unusual_realtek.h
@@ -0,0 +1,41 @@
+/* Driver for Realtek RTS51xx USB card reader
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#if defined(CONFIG_USB_STORAGE_REALTEK) || \
+ defined(CONFIG_USB_STORAGE_REALTEK_MODULE)
+
+UNUSUAL_DEV(0x0bda, 0x0159, 0x0000, 0x9999,
+ "Realtek",
+ "USB Card Reader",
+ USB_SC_SCSI, USB_PR_BULK, init_realtek_cr, 0),
+
+UNUSUAL_DEV(0x0bda, 0x0158, 0x0000, 0x9999,
+ "Realtek",
+ "USB Card Reader",
+ USB_SC_SCSI, USB_PR_BULK, init_realtek_cr, 0),
+
+UNUSUAL_DEV(0x0bda, 0x0138, 0x0000, 0x9999,
+ "Realtek",
+ "USB Card Reader",
+ USB_SC_SCSI, USB_PR_BULK, init_realtek_cr, 0),
+
+#endif /* defined(CONFIG_USB_STORAGE_REALTEK) || ... */
diff --git a/drivers/usb/storage/usual-tables.c b/drivers/usb/storage/usual-tables.c
index 468bde7d197..4293077c01a 100644
--- a/drivers/usb/storage/usual-tables.c
+++ b/drivers/usb/storage/usual-tables.c
@@ -85,6 +85,7 @@ static struct ignore_entry ignore_ids[] = {
# include "unusual_jumpshot.h"
# include "unusual_karma.h"
# include "unusual_onetouch.h"
+# include "unusual_realtek.h"
# include "unusual_sddr09.h"
# include "unusual_sddr55.h"
# include "unusual_usbat.h"
diff --git a/drivers/usb/wusbcore/rh.c b/drivers/usb/wusbcore/rh.c
index a68ad7aa0b5..c175b7300c7 100644
--- a/drivers/usb/wusbcore/rh.c
+++ b/drivers/usb/wusbcore/rh.c
@@ -156,7 +156,7 @@ int wusbhc_rh_status_data(struct usb_hcd *usb_hcd, char *_buf)
EXPORT_SYMBOL_GPL(wusbhc_rh_status_data);
/*
- * Return the hub's desciptor
+ * Return the hub's descriptor
*
* NOTE: almost cut and paste from ehci-hub.c
*
@@ -184,8 +184,8 @@ static int wusbhc_rh_get_hub_descr(struct wusbhc *wusbhc, u16 wValue,
descr->bPwrOn2PwrGood = 0;
descr->bHubContrCurrent = 0;
/* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
- memset(&descr->bitmap[0], 0, temp);
- memset(&descr->bitmap[temp], 0xff, temp);
+ memset(&descr->u.hs.DeviceRemovable[0], 0, temp);
+ memset(&descr->u.hs.DeviceRemovable[temp], 0xff, temp);
return 0;
}
diff --git a/drivers/usb/wusbcore/wusbhc.c b/drivers/usb/wusbcore/wusbhc.c
index 2054d4ee977..0faca16df76 100644
--- a/drivers/usb/wusbcore/wusbhc.c
+++ b/drivers/usb/wusbcore/wusbhc.c
@@ -320,7 +320,7 @@ u8 wusb_cluster_id_get(void)
u8 id;
spin_lock(&wusb_cluster_ids_lock);
id = find_first_zero_bit(wusb_cluster_id_table, CLUSTER_IDS);
- if (id > CLUSTER_IDS) {
+ if (id >= CLUSTER_IDS) {
id = 0;
goto out;
}
diff --git a/drivers/usb/wusbcore/wusbhc.h b/drivers/usb/wusbcore/wusbhc.h
index 3d94c4247f4..6bd426b7ec0 100644
--- a/drivers/usb/wusbcore/wusbhc.h
+++ b/drivers/usb/wusbcore/wusbhc.h
@@ -132,7 +132,7 @@ static inline void wusb_dev_put(struct wusb_dev *wusb_dev)
}
/**
- * Wireless USB Host Controlller root hub "fake" ports
+ * Wireless USB Host Controller root hub "fake" ports
* (state and device information)
*
* Wireless USB is wireless, so there are no ports; but we
diff --git a/drivers/uwb/scan.c b/drivers/uwb/scan.c
index 76a1a1ed7d3..367aa12786b 100644
--- a/drivers/uwb/scan.c
+++ b/drivers/uwb/scan.c
@@ -42,7 +42,7 @@
/**
* Start/stop scanning in a radio controller
*
- * @rc: UWB Radio Controlller
+ * @rc: UWB Radio Controller
* @channel: Channel to scan; encodings in WUSB1.0[Table 5.12]
* @type: Type of scanning to do.
* @bpst_offset: value at which to start scanning (if type ==
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 6bafb51bb43..e0ea23f07dd 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2365,6 +2365,15 @@ config FB_JZ4740
help
Framebuffer support for the JZ4740 SoC.
+config FB_MXS
+ tristate "MXS LCD framebuffer support"
+ depends on FB && ARCH_MXS
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ Framebuffer support for the MXS SoC.
+
source "drivers/video/omap/Kconfig"
source "drivers/video/omap2/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 8c8fabdff9d..9a096aebb85 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -153,6 +153,7 @@ obj-$(CONFIG_FB_BFIN_T350MCQB) += bfin-t350mcqb-fb.o
obj-$(CONFIG_FB_BFIN_7393) += bfin_adv7393fb.o
obj-$(CONFIG_FB_MX3) += mx3fb.o
obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o
+obj-$(CONFIG_FB_MXS) += mxsfb.o
# the test framebuffer is last
obj-$(CONFIG_FB_VIRTUAL) += vfb.o
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 1c2c68356ea..5fc983c5b92 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -120,8 +120,23 @@ static void clcdfb_enable(struct clcd_fb *fb, u32 cntl)
static int
clcdfb_set_bitfields(struct clcd_fb *fb, struct fb_var_screeninfo *var)
{
+ u32 caps;
int ret = 0;
+ if (fb->panel->caps && fb->board->caps)
+ caps = fb->panel->caps & fb->board->caps;
+ else {
+ /* Old way of specifying what can be used */
+ caps = fb->panel->cntl & CNTL_BGR ?
+ CLCD_CAP_BGR : CLCD_CAP_RGB;
+ /* But mask out 444 modes as they weren't supported */
+ caps &= ~CLCD_CAP_444;
+ }
+
+ /* Only TFT panels can do RGB888/BGR888 */
+ if (!(fb->panel->cntl & CNTL_LCDTFT))
+ caps &= ~CLCD_CAP_888;
+
memset(&var->transp, 0, sizeof(var->transp));
var->red.msb_right = 0;
@@ -133,6 +148,13 @@ clcdfb_set_bitfields(struct clcd_fb *fb, struct fb_var_screeninfo *var)
case 2:
case 4:
case 8:
+ /* If we can't do 5551, reject */
+ caps &= CLCD_CAP_5551;
+ if (!caps) {
+ ret = -EINVAL;
+ break;
+ }
+
var->red.length = var->bits_per_pixel;
var->red.offset = 0;
var->green.length = var->bits_per_pixel;
@@ -140,23 +162,61 @@ clcdfb_set_bitfields(struct clcd_fb *fb, struct fb_var_screeninfo *var)
var->blue.length = var->bits_per_pixel;
var->blue.offset = 0;
break;
+
case 16:
- var->red.length = 5;
- var->blue.length = 5;
+ /* If we can't do 444, 5551 or 565, reject */
+ if (!(caps & (CLCD_CAP_444 | CLCD_CAP_5551 | CLCD_CAP_565))) {
+ ret = -EINVAL;
+ break;
+ }
+
/*
- * Green length can be 5 or 6 depending whether
- * we're operating in RGB555 or RGB565 mode.
+ * Green length can be 4, 5 or 6 depending whether
+ * we're operating in 444, 5551 or 565 mode.
*/
- if (var->green.length != 5 && var->green.length != 6)
- var->green.length = 6;
+ if (var->green.length == 4 && caps & CLCD_CAP_444)
+ caps &= CLCD_CAP_444;
+ if (var->green.length == 5 && caps & CLCD_CAP_5551)
+ caps &= CLCD_CAP_5551;
+ else if (var->green.length == 6 && caps & CLCD_CAP_565)
+ caps &= CLCD_CAP_565;
+ else {
+ /*
+ * PL110 officially only supports RGB555,
+ * but may be wired up to allow RGB565.
+ */
+ if (caps & CLCD_CAP_565) {
+ var->green.length = 6;
+ caps &= CLCD_CAP_565;
+ } else if (caps & CLCD_CAP_5551) {
+ var->green.length = 5;
+ caps &= CLCD_CAP_5551;
+ } else {
+ var->green.length = 4;
+ caps &= CLCD_CAP_444;
+ }
+ }
+
+ if (var->green.length >= 5) {
+ var->red.length = 5;
+ var->blue.length = 5;
+ } else {
+ var->red.length = 4;
+ var->blue.length = 4;
+ }
break;
case 32:
- if (fb->panel->cntl & CNTL_LCDTFT) {
- var->red.length = 8;
- var->green.length = 8;
- var->blue.length = 8;
+ /* If we can't do 888, reject */
+ caps &= CLCD_CAP_888;
+ if (!caps) {
+ ret = -EINVAL;
break;
}
+
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ break;
default:
ret = -EINVAL;
break;
@@ -168,7 +228,20 @@ clcdfb_set_bitfields(struct clcd_fb *fb, struct fb_var_screeninfo *var)
* the bitfield length defined above.
*/
if (ret == 0 && var->bits_per_pixel >= 16) {
- if (fb->panel->cntl & CNTL_BGR) {
+ bool bgr, rgb;
+
+ bgr = caps & CLCD_CAP_BGR && var->blue.offset == 0;
+ rgb = caps & CLCD_CAP_RGB && var->red.offset == 0;
+
+ if (!bgr && !rgb)
+ /*
+ * The requested format was not possible, try just
+ * our capabilities. One of BGR or RGB must be
+ * supported.
+ */
+ bgr = caps & CLCD_CAP_BGR;
+
+ if (bgr) {
var->blue.offset = 0;
var->green.offset = var->blue.offset + var->blue.length;
var->red.offset = var->green.offset + var->green.length;
@@ -443,8 +516,8 @@ static int clcdfb_register(struct clcd_fb *fb)
fb_set_var(&fb->fb, &fb->fb.var);
- printk(KERN_INFO "CLCD: %s hardware, %s display\n",
- fb->board->name, fb->panel->mode.name);
+ dev_info(&fb->dev->dev, "%s hardware, %s display\n",
+ fb->board->name, fb->panel->mode.name);
ret = register_framebuffer(&fb->fb);
if (ret == 0)
@@ -461,7 +534,7 @@ static int clcdfb_register(struct clcd_fb *fb)
return ret;
}
-static int clcdfb_probe(struct amba_device *dev, struct amba_id *id)
+static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
{
struct clcd_board *board = dev->dev.platform_data;
struct clcd_fb *fb;
@@ -486,6 +559,10 @@ static int clcdfb_probe(struct amba_device *dev, struct amba_id *id)
fb->dev = dev;
fb->board = board;
+ dev_info(&fb->dev->dev, "PL%03x rev%u at 0x%08llx\n",
+ amba_part(dev), amba_rev(dev),
+ (unsigned long long)dev->res.start);
+
ret = fb->board->setup(fb);
if (ret)
goto free_fb;
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index b224396b86d..552825c469b 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -12,11 +12,12 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <linux/fb.h>
#include <linux/i2c.h>
#include <linux/backlight.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/88pm860x.h>
-#include <linux/slab.h>
#define MAX_BRIGHTNESS (0xFF)
#define MIN_BRIGHTNESS (0)
@@ -161,32 +162,13 @@ static const struct backlight_ops pm860x_backlight_ops = {
.get_brightness = pm860x_backlight_get_brightness,
};
-static int __check_device(struct pm860x_backlight_pdata *pdata, char *name)
-{
- struct pm860x_backlight_pdata *p = pdata;
- int ret = -EINVAL;
-
- while (p && p->id) {
- if ((p->id != PM8606_ID_BACKLIGHT) || (p->flags < 0))
- break;
-
- if (!strncmp(name, pm860x_backlight_name[p->flags],
- MFD_NAME_SIZE)) {
- ret = (int)p->flags;
- break;
- }
- p++;
- }
- return ret;
-}
-
static int pm860x_backlight_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
- struct pm860x_platform_data *pm860x_pdata;
struct pm860x_backlight_pdata *pdata = NULL;
struct pm860x_backlight_data *data;
struct backlight_device *bl;
+ struct mfd_cell *cell;
struct resource *res;
struct backlight_properties props;
unsigned char value;
@@ -199,10 +181,10 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (pdev->dev.parent->platform_data) {
- pm860x_pdata = pdev->dev.parent->platform_data;
- pdata = pm860x_pdata->backlight;
- }
+ cell = pdev->dev.platform_data;
+ if (cell == NULL)
+ return -ENODEV;
+ pdata = cell->mfd_data;
if (pdata == NULL) {
dev_err(&pdev->dev, "platform data isn't assigned to "
"backlight\n");
@@ -219,7 +201,7 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
data->current_brightness = MAX_BRIGHTNESS;
data->pwm = pdata->pwm;
data->iset = pdata->iset;
- data->port = __check_device(pdata, name);
+ data->port = pdata->flags;
if (data->port < 0) {
dev_err(&pdev->dev, "wrong platform data is assigned");
kfree(data);
diff --git a/drivers/video/cfbfillrect.c b/drivers/video/cfbfillrect.c
index ba9f58b2a5e..eb7a164f318 100644
--- a/drivers/video/cfbfillrect.c
+++ b/drivers/video/cfbfillrect.c
@@ -291,7 +291,7 @@ void cfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
p->fix.visual == FB_VISUAL_DIRECTCOLOR )
fg = ((u32 *) (p->pseudo_palette))[rect->color];
else
- fg = rect->color;
+ fg = solid_color(p, rect->color);
pat = pixel_to_pat(bpp, fg);
diff --git a/drivers/video/cfbimgblt.c b/drivers/video/cfbimgblt.c
index baed57d3cff..a12e545b92c 100644
--- a/drivers/video/cfbimgblt.c
+++ b/drivers/video/cfbimgblt.c
@@ -106,6 +106,7 @@ static inline void color_imageblit(const struct fb_image *image,
else
color = *src;
color <<= FB_LEFT_POS(p, bpp);
+ color = solid_color(p, color);
val |= FB_SHIFT_HIGH(p, color, shift ^ bswapmask);
if (shift >= null_bits) {
FB_WRITEL(val, dst++);
@@ -290,8 +291,8 @@ void cfb_imageblit(struct fb_info *p, const struct fb_image *image)
fgcolor = ((u32*)(p->pseudo_palette))[image->fg_color];
bgcolor = ((u32*)(p->pseudo_palette))[image->bg_color];
} else {
- fgcolor = image->fg_color;
- bgcolor = image->bg_color;
+ fgcolor = solid_color(p, image->fg_color);
+ bgcolor = solid_color(p, image->bg_color);
}
if (32 % bpp == 0 && !start_index && !pitch_index &&
diff --git a/drivers/video/edid.h b/drivers/video/edid.h
index bd89fb3be8c..d03a232d90b 100644
--- a/drivers/video/edid.h
+++ b/drivers/video/edid.h
@@ -101,8 +101,8 @@
#define V_SYNC_WIDTH COMBINE_HI_4LO( V_SYNC_WIDTH_HI, V_SYNC_WIDTH_LO )
#define V_SYNC_OFFSET COMBINE_HI_4LO( V_SYNC_OFFSET_HI, V_SYNC_OFFSET_LO )
-#define H_SYNC_WIDTH COMBINE_HI_4LO( H_SYNC_WIDTH_HI, H_SYNC_WIDTH_LO )
-#define H_SYNC_OFFSET COMBINE_HI_4LO( H_SYNC_OFFSET_HI, H_SYNC_OFFSET_LO )
+#define H_SYNC_WIDTH COMBINE_HI_8LO( H_SYNC_WIDTH_HI, H_SYNC_WIDTH_LO )
+#define H_SYNC_OFFSET COMBINE_HI_8LO( H_SYNC_OFFSET_HI, H_SYNC_OFFSET_LO )
#define H_SIZE_LO (unsigned)block[ 12 ]
#define V_SIZE_LO (unsigned)block[ 13 ]
diff --git a/drivers/video/fb_draw.h b/drivers/video/fb_draw.h
index 04c01faaf77..4ba0e0b1f16 100644
--- a/drivers/video/fb_draw.h
+++ b/drivers/video/fb_draw.h
@@ -15,6 +15,20 @@ comp(unsigned long a, unsigned long b, unsigned long mask)
return ((a ^ b) & mask) ^ b;
}
+/* if framebuffer has alpha channel, mask in the appropriate
+ * bit(s) to make the specified color opaque
+ */
+static inline unsigned long
+solid_color(struct fb_info *p, unsigned long color)
+{
+ if (p->var.transp.length > 0) {
+ u32 mask = (1 << p->var.transp.length) - 1;
+ mask <<= p->var.transp.offset;
+ color |= mask;
+ }
+ return color;
+}
+
/*
* Create a pattern with the given pixel's color
*/
diff --git a/drivers/video/msm/mdp_hw.h b/drivers/video/msm/mdp_hw.h
index 4e3deb4e592..d80477415ca 100644
--- a/drivers/video/msm/mdp_hw.h
+++ b/drivers/video/msm/mdp_hw.h
@@ -449,6 +449,7 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req,
#define PPP_CFG_MDP_XRGB_8888(dir) PPP_CFG_MDP_ARGB_8888(dir)
#define PPP_CFG_MDP_RGBA_8888(dir) PPP_CFG_MDP_ARGB_8888(dir)
#define PPP_CFG_MDP_BGRA_8888(dir) PPP_CFG_MDP_ARGB_8888(dir)
+#define PPP_CFG_MDP_RGBX_8888(dir) PPP_CFG_MDP_ARGB_8888(dir)
#define PPP_CFG_MDP_Y_CBCR_H2V2(dir) (PPP_##dir##_C2R_8BIT | \
PPP_##dir##_C0G_8BIT | \
@@ -488,12 +489,14 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req,
MDP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8)
#define PPP_PACK_PATTERN_MDP_RGB_888 PPP_PACK_PATTERN_MDP_RGB_565
#define PPP_PACK_PATTERN_MDP_XRGB_8888 \
- MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G, CLR_B, 8)
+ MDP_GET_PACK_PATTERN(CLR_B, CLR_G, CLR_R, CLR_ALPHA, 8)
#define PPP_PACK_PATTERN_MDP_ARGB_8888 PPP_PACK_PATTERN_MDP_XRGB_8888
#define PPP_PACK_PATTERN_MDP_RGBA_8888 \
MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B, CLR_G, CLR_R, 8)
#define PPP_PACK_PATTERN_MDP_BGRA_8888 \
MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G, CLR_B, 8)
+#define PPP_PACK_PATTERN_MDP_RGBX_8888 \
+ MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B, CLR_G, CLR_R, 8)
#define PPP_PACK_PATTERN_MDP_Y_CBCR_H2V1 \
MDP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8)
#define PPP_PACK_PATTERN_MDP_Y_CBCR_H2V2 PPP_PACK_PATTERN_MDP_Y_CBCR_H2V1
@@ -509,6 +512,7 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req,
#define PPP_CHROMA_SAMP_MDP_ARGB_8888(dir) PPP_OP_##dir##_CHROMA_RGB
#define PPP_CHROMA_SAMP_MDP_RGBA_8888(dir) PPP_OP_##dir##_CHROMA_RGB
#define PPP_CHROMA_SAMP_MDP_BGRA_8888(dir) PPP_OP_##dir##_CHROMA_RGB
+#define PPP_CHROMA_SAMP_MDP_RGBX_8888(dir) PPP_OP_##dir##_CHROMA_RGB
#define PPP_CHROMA_SAMP_MDP_Y_CBCR_H2V1(dir) PPP_OP_##dir##_CHROMA_H2V1
#define PPP_CHROMA_SAMP_MDP_Y_CBCR_H2V2(dir) PPP_OP_##dir##_CHROMA_420
#define PPP_CHROMA_SAMP_MDP_Y_CRCB_H2V1(dir) PPP_OP_##dir##_CHROMA_H2V1
@@ -523,6 +527,7 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req,
[MDP_ARGB_8888] = PPP_##name##_MDP_ARGB_8888,\
[MDP_RGBA_8888] = PPP_##name##_MDP_RGBA_8888,\
[MDP_BGRA_8888] = PPP_##name##_MDP_BGRA_8888,\
+ [MDP_RGBX_8888] = PPP_##name##_MDP_RGBX_8888,\
[MDP_Y_CBCR_H2V1] = PPP_##name##_MDP_Y_CBCR_H2V1,\
[MDP_Y_CBCR_H2V2] = PPP_##name##_MDP_Y_CBCR_H2V2,\
[MDP_Y_CRCB_H2V1] = PPP_##name##_MDP_Y_CRCB_H2V1,\
@@ -536,6 +541,7 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req,
[MDP_ARGB_8888] = PPP_##name##_MDP_ARGB_8888(dir),\
[MDP_RGBA_8888] = PPP_##name##_MDP_RGBA_8888(dir),\
[MDP_BGRA_8888] = PPP_##name##_MDP_BGRA_8888(dir),\
+ [MDP_RGBX_8888] = PPP_##name##_MDP_RGBX_8888(dir),\
[MDP_Y_CBCR_H2V1] = PPP_##name##_MDP_Y_CBCR_H2V1(dir),\
[MDP_Y_CBCR_H2V2] = PPP_##name##_MDP_Y_CBCR_H2V2(dir),\
[MDP_Y_CRCB_H2V1] = PPP_##name##_MDP_Y_CRCB_H2V1(dir),\
@@ -547,7 +553,8 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req,
(img == MDP_YCRYCB_H2V1))
#define IS_RGB(img) ((img == MDP_RGB_565) | (img == MDP_RGB_888) | \
(img == MDP_ARGB_8888) | (img == MDP_RGBA_8888) | \
- (img == MDP_XRGB_8888) | (img == MDP_BGRA_8888))
+ (img == MDP_XRGB_8888) | (img == MDP_BGRA_8888) | \
+ (img == MDP_RGBX_8888))
#define HAS_ALPHA(img) ((img == MDP_ARGB_8888) | (img == MDP_RGBA_8888) | \
(img == MDP_BGRA_8888))
diff --git a/drivers/video/msm/mdp_ppp.c b/drivers/video/msm/mdp_ppp.c
index 4ff001f4cbb..2b6564e8bfe 100644
--- a/drivers/video/msm/mdp_ppp.c
+++ b/drivers/video/msm/mdp_ppp.c
@@ -69,6 +69,7 @@ static uint32_t bytes_per_pixel[] = {
[MDP_ARGB_8888] = 4,
[MDP_RGBA_8888] = 4,
[MDP_BGRA_8888] = 4,
+ [MDP_RGBX_8888] = 4,
[MDP_Y_CBCR_H2V1] = 1,
[MDP_Y_CBCR_H2V2] = 1,
[MDP_Y_CRCB_H2V1] = 1,
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index debe5933fd2..ec351309e60 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -81,7 +81,6 @@ struct msmfb_info {
spinlock_t update_lock;
struct mutex panel_init_lock;
wait_queue_head_t frame_wq;
- struct workqueue_struct *resume_workqueue;
struct work_struct resume_work;
struct msmfb_callback dma_callback;
struct msmfb_callback vsync_callback;
@@ -111,7 +110,7 @@ static void msmfb_handle_dma_interrupt(struct msmfb_callback *callback)
if (msmfb->sleeping == UPDATING &&
msmfb->frame_done == msmfb->update_frame) {
DLOG(SUSPEND_RESUME, "full update completed\n");
- queue_work(msmfb->resume_workqueue, &msmfb->resume_work);
+ schedule_work(&msmfb->resume_work);
}
spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
wake_up(&msmfb->frame_wq);
@@ -220,8 +219,8 @@ restart:
sleeping = msmfb->sleeping;
/* on a full update, if the last frame has not completed, wait for it */
- if (pan_display && (msmfb->frame_requested != msmfb->frame_done ||
- sleeping == UPDATING)) {
+ if ((pan_display && msmfb->frame_requested != msmfb->frame_done) ||
+ sleeping == UPDATING) {
int ret;
spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
ret = wait_event_interruptible_timeout(msmfb->frame_wq,
@@ -470,6 +469,18 @@ static void setup_fb_info(struct msmfb_info *msmfb)
fb_info->var.yoffset = 0;
if (msmfb->panel->caps & MSMFB_CAP_PARTIAL_UPDATES) {
+ /*
+ * Set the param in the fixed screen, so userspace can't
+ * change it. This will be used to check for the
+ * capability.
+ */
+ fb_info->fix.reserved[0] = 0x5444;
+ fb_info->fix.reserved[1] = 0x5055;
+
+ /*
+ * This preloads the value so that if userspace doesn't
+ * change it, it will be a full update
+ */
fb_info->var.reserved[0] = 0x54445055;
fb_info->var.reserved[1] = 0;
fb_info->var.reserved[2] = (uint16_t)msmfb->xres |
@@ -559,12 +570,6 @@ static int msmfb_probe(struct platform_device *pdev)
spin_lock_init(&msmfb->update_lock);
mutex_init(&msmfb->panel_init_lock);
init_waitqueue_head(&msmfb->frame_wq);
- msmfb->resume_workqueue = create_workqueue("panel_on");
- if (msmfb->resume_workqueue == NULL) {
- printk(KERN_ERR "failed to create panel_on workqueue\n");
- ret = -ENOMEM;
- goto error_create_workqueue;
- }
INIT_WORK(&msmfb->resume_work, power_on_panel);
msmfb->black = kzalloc(msmfb->fb->var.bits_per_pixel*msmfb->xres,
GFP_KERNEL);
@@ -589,8 +594,6 @@ static int msmfb_probe(struct platform_device *pdev)
return 0;
error_register_framebuffer:
- destroy_workqueue(msmfb->resume_workqueue);
-error_create_workqueue:
iounmap(fb->screen_base);
error_setup_fbmem:
framebuffer_release(msmfb->fb);
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
new file mode 100644
index 00000000000..7d028488298
--- /dev/null
+++ b/drivers/video/mxsfb.c
@@ -0,0 +1,919 @@
+/*
+ * Copyright (C) 2010 Juergen Beisert, Pengutronix
+ *
+ * This code is based on:
+ * Author: Vitaly Wool <vital@embeddedalley.com>
+ *
+ * Copyright 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define DRIVER_NAME "mxsfb"
+
+/**
+ * @file
+ * @brief LCDIF driver for i.MX23 and i.MX28
+ *
+ * The LCDIF support four modes of operation
+ * - MPU interface (to drive smart displays) -> not supported yet
+ * - VSYNC interface (like MPU interface plus Vsync) -> not supported yet
+ * - Dotclock interface (to drive LC displays with RGB data and sync signals)
+ * - DVI (to drive ITU-R BT656) -> not supported yet
+ *
+ * This driver depends on a correct setup of the pins used for this purpose
+ * (platform specific).
+ *
+ * For the developer: Don't forget to set the data bus width to the display
+ * in the imx_fb_videomode structure. You will else end up with ugly colours.
+ * If you fight against jitter you can vary the clock delay. This is a feature
+ * of the i.MX28 and you can vary it between 2 ns ... 8 ns in 2 ns steps. Give
+ * the required value in the imx_fb_videomode structure.
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <mach/mxsfb.h>
+
+#define REG_SET 4
+#define REG_CLR 8
+
+#define LCDC_CTRL 0x00
+#define LCDC_CTRL1 0x10
+#define LCDC_V4_CTRL2 0x20
+#define LCDC_V3_TRANSFER_COUNT 0x20
+#define LCDC_V4_TRANSFER_COUNT 0x30
+#define LCDC_V4_CUR_BUF 0x40
+#define LCDC_V4_NEXT_BUF 0x50
+#define LCDC_V3_CUR_BUF 0x30
+#define LCDC_V3_NEXT_BUF 0x40
+#define LCDC_TIMING 0x60
+#define LCDC_VDCTRL0 0x70
+#define LCDC_VDCTRL1 0x80
+#define LCDC_VDCTRL2 0x90
+#define LCDC_VDCTRL3 0xa0
+#define LCDC_VDCTRL4 0xb0
+#define LCDC_DVICTRL0 0xc0
+#define LCDC_DVICTRL1 0xd0
+#define LCDC_DVICTRL2 0xe0
+#define LCDC_DVICTRL3 0xf0
+#define LCDC_DVICTRL4 0x100
+#define LCDC_V4_DATA 0x180
+#define LCDC_V3_DATA 0x1b0
+#define LCDC_V4_DEBUG0 0x1d0
+#define LCDC_V3_DEBUG0 0x1f0
+
+#define CTRL_SFTRST (1 << 31)
+#define CTRL_CLKGATE (1 << 30)
+#define CTRL_BYPASS_COUNT (1 << 19)
+#define CTRL_VSYNC_MODE (1 << 18)
+#define CTRL_DOTCLK_MODE (1 << 17)
+#define CTRL_DATA_SELECT (1 << 16)
+#define CTRL_SET_BUS_WIDTH(x) (((x) & 0x3) << 10)
+#define CTRL_GET_BUS_WIDTH(x) (((x) >> 10) & 0x3)
+#define CTRL_SET_WORD_LENGTH(x) (((x) & 0x3) << 8)
+#define CTRL_GET_WORD_LENGTH(x) (((x) >> 8) & 0x3)
+#define CTRL_MASTER (1 << 5)
+#define CTRL_DF16 (1 << 3)
+#define CTRL_DF18 (1 << 2)
+#define CTRL_DF24 (1 << 1)
+#define CTRL_RUN (1 << 0)
+
+#define CTRL1_FIFO_CLEAR (1 << 21)
+#define CTRL1_SET_BYTE_PACKAGING(x) (((x) & 0xf) << 16)
+#define CTRL1_GET_BYTE_PACKAGING(x) (((x) >> 16) & 0xf)
+
+#define TRANSFER_COUNT_SET_VCOUNT(x) (((x) & 0xffff) << 16)
+#define TRANSFER_COUNT_GET_VCOUNT(x) (((x) >> 16) & 0xffff)
+#define TRANSFER_COUNT_SET_HCOUNT(x) ((x) & 0xffff)
+#define TRANSFER_COUNT_GET_HCOUNT(x) ((x) & 0xffff)
+
+
+#define VDCTRL0_ENABLE_PRESENT (1 << 28)
+#define VDCTRL0_VSYNC_ACT_HIGH (1 << 27)
+#define VDCTRL0_HSYNC_ACT_HIGH (1 << 26)
+#define VDCTRL0_DOTCLK_ACT_FAILING (1 << 25)
+#define VDCTRL0_ENABLE_ACT_HIGH (1 << 24)
+#define VDCTRL0_VSYNC_PERIOD_UNIT (1 << 21)
+#define VDCTRL0_VSYNC_PULSE_WIDTH_UNIT (1 << 20)
+#define VDCTRL0_HALF_LINE (1 << 19)
+#define VDCTRL0_HALF_LINE_MODE (1 << 18)
+#define VDCTRL0_SET_VSYNC_PULSE_WIDTH(x) ((x) & 0x3ffff)
+#define VDCTRL0_GET_VSYNC_PULSE_WIDTH(x) ((x) & 0x3ffff)
+
+#define VDCTRL2_SET_HSYNC_PERIOD(x) ((x) & 0x3ffff)
+#define VDCTRL2_GET_HSYNC_PERIOD(x) ((x) & 0x3ffff)
+
+#define VDCTRL3_MUX_SYNC_SIGNALS (1 << 29)
+#define VDCTRL3_VSYNC_ONLY (1 << 28)
+#define SET_HOR_WAIT_CNT(x) (((x) & 0xfff) << 16)
+#define GET_HOR_WAIT_CNT(x) (((x) >> 16) & 0xfff)
+#define SET_VERT_WAIT_CNT(x) ((x) & 0xffff)
+#define GET_VERT_WAIT_CNT(x) ((x) & 0xffff)
+
+#define VDCTRL4_SET_DOTCLK_DLY(x) (((x) & 0x7) << 29) /* v4 only */
+#define VDCTRL4_GET_DOTCLK_DLY(x) (((x) >> 29) & 0x7) /* v4 only */
+#define VDCTRL4_SYNC_SIGNALS_ON (1 << 18)
+#define SET_DOTCLK_H_VALID_DATA_CNT(x) ((x) & 0x3ffff)
+
+#define DEBUG0_HSYNC (1 < 26)
+#define DEBUG0_VSYNC (1 < 25)
+
+#define MIN_XRES 120
+#define MIN_YRES 120
+
+#define RED 0
+#define GREEN 1
+#define BLUE 2
+#define TRANSP 3
+
+enum mxsfb_devtype {
+ MXSFB_V3,
+ MXSFB_V4,
+};
+
+/* CPU dependent register offsets */
+struct mxsfb_devdata {
+ unsigned transfer_count;
+ unsigned cur_buf;
+ unsigned next_buf;
+ unsigned debug0;
+ unsigned hs_wdth_mask;
+ unsigned hs_wdth_shift;
+ unsigned ipversion;
+};
+
+struct mxsfb_info {
+ struct fb_info fb_info;
+ struct platform_device *pdev;
+ struct clk *clk;
+ void __iomem *base; /* registers */
+ unsigned allocated_size;
+ int enabled;
+ unsigned ld_intf_width;
+ unsigned dotclk_delay;
+ const struct mxsfb_devdata *devdata;
+ int mapped;
+};
+
+#define mxsfb_is_v3(host) (host->devdata->ipversion == 3)
+#define mxsfb_is_v4(host) (host->devdata->ipversion == 4)
+
+static const struct mxsfb_devdata mxsfb_devdata[] = {
+ [MXSFB_V3] = {
+ .transfer_count = LCDC_V3_TRANSFER_COUNT,
+ .cur_buf = LCDC_V3_CUR_BUF,
+ .next_buf = LCDC_V3_NEXT_BUF,
+ .debug0 = LCDC_V3_DEBUG0,
+ .hs_wdth_mask = 0xff,
+ .hs_wdth_shift = 24,
+ .ipversion = 3,
+ },
+ [MXSFB_V4] = {
+ .transfer_count = LCDC_V4_TRANSFER_COUNT,
+ .cur_buf = LCDC_V4_CUR_BUF,
+ .next_buf = LCDC_V4_NEXT_BUF,
+ .debug0 = LCDC_V4_DEBUG0,
+ .hs_wdth_mask = 0x3fff,
+ .hs_wdth_shift = 18,
+ .ipversion = 4,
+ },
+};
+
+#define to_imxfb_host(x) (container_of(x, struct mxsfb_info, fb_info))
+
+/* mask and shift depends on architecture */
+static inline u32 set_hsync_pulse_width(struct mxsfb_info *host, unsigned val)
+{
+ return (val & host->devdata->hs_wdth_mask) <<
+ host->devdata->hs_wdth_shift;
+}
+
+static inline u32 get_hsync_pulse_width(struct mxsfb_info *host, unsigned val)
+{
+ return (val >> host->devdata->hs_wdth_shift) &
+ host->devdata->hs_wdth_mask;
+}
+
+static const struct fb_bitfield def_rgb565[] = {
+ [RED] = {
+ .offset = 11,
+ .length = 5,
+ },
+ [GREEN] = {
+ .offset = 5,
+ .length = 6,
+ },
+ [BLUE] = {
+ .offset = 0,
+ .length = 5,
+ },
+ [TRANSP] = { /* no support for transparency */
+ .length = 0,
+ }
+};
+
+static const struct fb_bitfield def_rgb666[] = {
+ [RED] = {
+ .offset = 16,
+ .length = 6,
+ },
+ [GREEN] = {
+ .offset = 8,
+ .length = 6,
+ },
+ [BLUE] = {
+ .offset = 0,
+ .length = 6,
+ },
+ [TRANSP] = { /* no support for transparency */
+ .length = 0,
+ }
+};
+
+static const struct fb_bitfield def_rgb888[] = {
+ [RED] = {
+ .offset = 16,
+ .length = 8,
+ },
+ [GREEN] = {
+ .offset = 8,
+ .length = 8,
+ },
+ [BLUE] = {
+ .offset = 0,
+ .length = 8,
+ },
+ [TRANSP] = { /* no support for transparency */
+ .length = 0,
+ }
+};
+
+static inline unsigned chan_to_field(unsigned chan, struct fb_bitfield *bf)
+{
+ chan &= 0xffff;
+ chan >>= 16 - bf->length;
+ return chan << bf->offset;
+}
+
+static int mxsfb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *fb_info)
+{
+ struct mxsfb_info *host = to_imxfb_host(fb_info);
+ const struct fb_bitfield *rgb = NULL;
+
+ if (var->xres < MIN_XRES)
+ var->xres = MIN_XRES;
+ if (var->yres < MIN_YRES)
+ var->yres = MIN_YRES;
+
+ var->xres_virtual = var->xres;
+
+ var->yres_virtual = var->yres;
+
+ switch (var->bits_per_pixel) {
+ case 16:
+ /* always expect RGB 565 */
+ rgb = def_rgb565;
+ break;
+ case 32:
+ switch (host->ld_intf_width) {
+ case STMLCDIF_8BIT:
+ pr_debug("Unsupported LCD bus width mapping\n");
+ break;
+ case STMLCDIF_16BIT:
+ case STMLCDIF_18BIT:
+ /* 24 bit to 18 bit mapping */
+ rgb = def_rgb666;
+ break;
+ case STMLCDIF_24BIT:
+ /* real 24 bit */
+ rgb = def_rgb888;
+ break;
+ }
+ break;
+ default:
+ pr_debug("Unsupported colour depth: %u\n", var->bits_per_pixel);
+ return -EINVAL;
+ }
+
+ /*
+ * Copy the RGB parameters for this display
+ * from the machine specific parameters.
+ */
+ var->red = rgb[RED];
+ var->green = rgb[GREEN];
+ var->blue = rgb[BLUE];
+ var->transp = rgb[TRANSP];
+
+ return 0;
+}
+
+static void mxsfb_enable_controller(struct fb_info *fb_info)
+{
+ struct mxsfb_info *host = to_imxfb_host(fb_info);
+ u32 reg;
+
+ dev_dbg(&host->pdev->dev, "%s\n", __func__);
+
+ clk_enable(host->clk);
+ clk_set_rate(host->clk, PICOS2KHZ(fb_info->var.pixclock) * 1000U);
+
+ /* if it was disabled, re-enable the mode again */
+ writel(CTRL_DOTCLK_MODE, host->base + LCDC_CTRL + REG_SET);
+
+ /* enable the SYNC signals first, then the DMA engine */
+ reg = readl(host->base + LCDC_VDCTRL4);
+ reg |= VDCTRL4_SYNC_SIGNALS_ON;
+ writel(reg, host->base + LCDC_VDCTRL4);
+
+ writel(CTRL_RUN, host->base + LCDC_CTRL + REG_SET);
+
+ host->enabled = 1;
+}
+
+static void mxsfb_disable_controller(struct fb_info *fb_info)
+{
+ struct mxsfb_info *host = to_imxfb_host(fb_info);
+ unsigned loop;
+ u32 reg;
+
+ dev_dbg(&host->pdev->dev, "%s\n", __func__);
+
+ /*
+ * Even if we disable the controller here, it will still continue
+ * until its FIFOs are running out of data
+ */
+ writel(CTRL_DOTCLK_MODE, host->base + LCDC_CTRL + REG_CLR);
+
+ loop = 1000;
+ while (loop) {
+ reg = readl(host->base + LCDC_CTRL);
+ if (!(reg & CTRL_RUN))
+ break;
+ loop--;
+ }
+
+ writel(VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4 + REG_CLR);
+
+ clk_disable(host->clk);
+
+ host->enabled = 0;
+}
+
+static int mxsfb_set_par(struct fb_info *fb_info)
+{
+ struct mxsfb_info *host = to_imxfb_host(fb_info);
+ u32 ctrl, vdctrl0, vdctrl4;
+ int line_size, fb_size;
+ int reenable = 0;
+
+ line_size = fb_info->var.xres * (fb_info->var.bits_per_pixel >> 3);
+ fb_size = fb_info->var.yres_virtual * line_size;
+
+ if (fb_size > fb_info->fix.smem_len)
+ return -ENOMEM;
+
+ fb_info->fix.line_length = line_size;
+
+ /*
+ * It seems, you can't re-program the controller if it is still running.
+ * This may lead into shifted pictures (FIFO issue?).
+ * So, first stop the controller and drain its FIFOs
+ */
+ if (host->enabled) {
+ reenable = 1;
+ mxsfb_disable_controller(fb_info);
+ }
+
+ /* clear the FIFOs */
+ writel(CTRL1_FIFO_CLEAR, host->base + LCDC_CTRL1 + REG_SET);
+
+ ctrl = CTRL_BYPASS_COUNT | CTRL_MASTER |
+ CTRL_SET_BUS_WIDTH(host->ld_intf_width);;
+
+ switch (fb_info->var.bits_per_pixel) {
+ case 16:
+ dev_dbg(&host->pdev->dev, "Setting up RGB565 mode\n");
+ ctrl |= CTRL_SET_WORD_LENGTH(0);
+ writel(CTRL1_SET_BYTE_PACKAGING(0xf), host->base + LCDC_CTRL1);
+ break;
+ case 32:
+ dev_dbg(&host->pdev->dev, "Setting up RGB888/666 mode\n");
+ ctrl |= CTRL_SET_WORD_LENGTH(3);
+ switch (host->ld_intf_width) {
+ case STMLCDIF_8BIT:
+ dev_dbg(&host->pdev->dev,
+ "Unsupported LCD bus width mapping\n");
+ return -EINVAL;
+ case STMLCDIF_16BIT:
+ case STMLCDIF_18BIT:
+ /* 24 bit to 18 bit mapping */
+ ctrl |= CTRL_DF24; /* ignore the upper 2 bits in
+ * each colour component
+ */
+ break;
+ case STMLCDIF_24BIT:
+ /* real 24 bit */
+ break;
+ }
+ /* do not use packed pixels = one pixel per word instead */
+ writel(CTRL1_SET_BYTE_PACKAGING(0x7), host->base + LCDC_CTRL1);
+ break;
+ default:
+ dev_dbg(&host->pdev->dev, "Unhandled color depth of %u\n",
+ fb_info->var.bits_per_pixel);
+ return -EINVAL;
+ }
+
+ writel(ctrl, host->base + LCDC_CTRL);
+
+ writel(TRANSFER_COUNT_SET_VCOUNT(fb_info->var.yres) |
+ TRANSFER_COUNT_SET_HCOUNT(fb_info->var.xres),
+ host->base + host->devdata->transfer_count);
+
+ vdctrl0 = VDCTRL0_ENABLE_PRESENT | /* always in DOTCLOCK mode */
+ VDCTRL0_VSYNC_PERIOD_UNIT |
+ VDCTRL0_VSYNC_PULSE_WIDTH_UNIT |
+ VDCTRL0_SET_VSYNC_PULSE_WIDTH(fb_info->var.vsync_len);
+ if (fb_info->var.sync & FB_SYNC_HOR_HIGH_ACT)
+ vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH;
+ if (fb_info->var.sync & FB_SYNC_VERT_HIGH_ACT)
+ vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH;
+ if (fb_info->var.sync & FB_SYNC_DATA_ENABLE_HIGH_ACT)
+ vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH;
+ if (fb_info->var.sync & FB_SYNC_DOTCLK_FAILING_ACT)
+ vdctrl0 |= VDCTRL0_DOTCLK_ACT_FAILING;
+
+ writel(vdctrl0, host->base + LCDC_VDCTRL0);
+
+ /* frame length in lines */
+ writel(fb_info->var.upper_margin + fb_info->var.vsync_len +
+ fb_info->var.lower_margin + fb_info->var.yres,
+ host->base + LCDC_VDCTRL1);
+
+ /* line length in units of clocks or pixels */
+ writel(set_hsync_pulse_width(host, fb_info->var.hsync_len) |
+ VDCTRL2_SET_HSYNC_PERIOD(fb_info->var.left_margin +
+ fb_info->var.hsync_len + fb_info->var.right_margin +
+ fb_info->var.xres),
+ host->base + LCDC_VDCTRL2);
+
+ writel(SET_HOR_WAIT_CNT(fb_info->var.left_margin +
+ fb_info->var.hsync_len) |
+ SET_VERT_WAIT_CNT(fb_info->var.upper_margin +
+ fb_info->var.vsync_len),
+ host->base + LCDC_VDCTRL3);
+
+ vdctrl4 = SET_DOTCLK_H_VALID_DATA_CNT(fb_info->var.xres);
+ if (mxsfb_is_v4(host))
+ vdctrl4 |= VDCTRL4_SET_DOTCLK_DLY(host->dotclk_delay);
+ writel(vdctrl4, host->base + LCDC_VDCTRL4);
+
+ writel(fb_info->fix.smem_start +
+ fb_info->fix.line_length * fb_info->var.yoffset,
+ host->base + host->devdata->next_buf);
+
+ if (reenable)
+ mxsfb_enable_controller(fb_info);
+
+ return 0;
+}
+
+static int mxsfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+ u_int transp, struct fb_info *fb_info)
+{
+ unsigned int val;
+ int ret = -EINVAL;
+
+ /*
+ * If greyscale is true, then we convert the RGB value
+ * to greyscale no matter what visual we are using.
+ */
+ if (fb_info->var.grayscale)
+ red = green = blue = (19595 * red + 38470 * green +
+ 7471 * blue) >> 16;
+
+ switch (fb_info->fix.visual) {
+ case FB_VISUAL_TRUECOLOR:
+ /*
+ * 12 or 16-bit True Colour. We encode the RGB value
+ * according to the RGB bitfield information.
+ */
+ if (regno < 16) {
+ u32 *pal = fb_info->pseudo_palette;
+
+ val = chan_to_field(red, &fb_info->var.red);
+ val |= chan_to_field(green, &fb_info->var.green);
+ val |= chan_to_field(blue, &fb_info->var.blue);
+
+ pal[regno] = val;
+ ret = 0;
+ }
+ break;
+
+ case FB_VISUAL_STATIC_PSEUDOCOLOR:
+ case FB_VISUAL_PSEUDOCOLOR:
+ break;
+ }
+
+ return ret;
+}
+
+static int mxsfb_blank(int blank, struct fb_info *fb_info)
+{
+ struct mxsfb_info *host = to_imxfb_host(fb_info);
+
+ switch (blank) {
+ case FB_BLANK_POWERDOWN:
+ case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_HSYNC_SUSPEND:
+ case FB_BLANK_NORMAL:
+ if (host->enabled)
+ mxsfb_disable_controller(fb_info);
+ break;
+
+ case FB_BLANK_UNBLANK:
+ if (!host->enabled)
+ mxsfb_enable_controller(fb_info);
+ break;
+ }
+ return 0;
+}
+
+static int mxsfb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *fb_info)
+{
+ struct mxsfb_info *host = to_imxfb_host(fb_info);
+ unsigned offset;
+
+ if (var->xoffset != 0)
+ return -EINVAL;
+
+ offset = fb_info->fix.line_length * var->yoffset;
+
+ /* update on next VSYNC */
+ writel(fb_info->fix.smem_start + offset,
+ host->base + host->devdata->next_buf);
+
+ return 0;
+}
+
+static struct fb_ops mxsfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = mxsfb_check_var,
+ .fb_set_par = mxsfb_set_par,
+ .fb_setcolreg = mxsfb_setcolreg,
+ .fb_blank = mxsfb_blank,
+ .fb_pan_display = mxsfb_pan_display,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+};
+
+static int __devinit mxsfb_restore_mode(struct mxsfb_info *host)
+{
+ struct fb_info *fb_info = &host->fb_info;
+ unsigned line_count;
+ unsigned period;
+ unsigned long pa, fbsize;
+ int bits_per_pixel, ofs;
+ u32 transfer_count, vdctrl0, vdctrl2, vdctrl3, vdctrl4, ctrl;
+ struct fb_videomode vmode;
+
+ /* Only restore the mode when the controller is running */
+ ctrl = readl(host->base + LCDC_CTRL);
+ if (!(ctrl & CTRL_RUN))
+ return -EINVAL;
+
+ vdctrl0 = readl(host->base + LCDC_VDCTRL0);
+ vdctrl2 = readl(host->base + LCDC_VDCTRL2);
+ vdctrl3 = readl(host->base + LCDC_VDCTRL3);
+ vdctrl4 = readl(host->base + LCDC_VDCTRL4);
+
+ transfer_count = readl(host->base + host->devdata->transfer_count);
+
+ vmode.xres = TRANSFER_COUNT_GET_HCOUNT(transfer_count);
+ vmode.yres = TRANSFER_COUNT_GET_VCOUNT(transfer_count);
+
+ switch (CTRL_GET_WORD_LENGTH(ctrl)) {
+ case 0:
+ bits_per_pixel = 16;
+ break;
+ case 3:
+ bits_per_pixel = 32;
+ case 1:
+ default:
+ return -EINVAL;
+ }
+
+ fb_info->var.bits_per_pixel = bits_per_pixel;
+
+ vmode.pixclock = KHZ2PICOS(clk_get_rate(host->clk) / 1000U);
+ vmode.hsync_len = get_hsync_pulse_width(host, vdctrl2);
+ vmode.left_margin = GET_HOR_WAIT_CNT(vdctrl3) - vmode.hsync_len;
+ vmode.right_margin = VDCTRL2_GET_HSYNC_PERIOD(vdctrl2) - vmode.hsync_len -
+ vmode.left_margin - vmode.xres;
+ vmode.vsync_len = VDCTRL0_GET_VSYNC_PULSE_WIDTH(vdctrl0);
+ period = readl(host->base + LCDC_VDCTRL1);
+ vmode.upper_margin = GET_VERT_WAIT_CNT(vdctrl3) - vmode.vsync_len;
+ vmode.lower_margin = period - vmode.vsync_len - vmode.upper_margin - vmode.yres;
+
+ vmode.vmode = FB_VMODE_NONINTERLACED;
+
+ vmode.sync = 0;
+ if (vdctrl0 & VDCTRL0_HSYNC_ACT_HIGH)
+ vmode.sync |= FB_SYNC_HOR_HIGH_ACT;
+ if (vdctrl0 & VDCTRL0_VSYNC_ACT_HIGH)
+ vmode.sync |= FB_SYNC_VERT_HIGH_ACT;
+
+ pr_debug("Reconstructed video mode:\n");
+ pr_debug("%dx%d, hsync: %u left: %u, right: %u, vsync: %u, upper: %u, lower: %u\n",
+ vmode.xres, vmode.yres,
+ vmode.hsync_len, vmode.left_margin, vmode.right_margin,
+ vmode.vsync_len, vmode.upper_margin, vmode.lower_margin);
+ pr_debug("pixclk: %ldkHz\n", PICOS2KHZ(vmode.pixclock));
+
+ fb_add_videomode(&vmode, &fb_info->modelist);
+
+ host->ld_intf_width = CTRL_GET_BUS_WIDTH(ctrl);
+ host->dotclk_delay = VDCTRL4_GET_DOTCLK_DLY(vdctrl4);
+
+ fb_info->fix.line_length = vmode.xres * (bits_per_pixel >> 3);
+
+ pa = readl(host->base + host->devdata->cur_buf);
+ fbsize = fb_info->fix.line_length * vmode.yres;
+ if (pa < fb_info->fix.smem_start)
+ return -EINVAL;
+ if (pa + fbsize > fb_info->fix.smem_start + fb_info->fix.smem_len)
+ return -EINVAL;
+ ofs = pa - fb_info->fix.smem_start;
+ if (ofs) {
+ memmove(fb_info->screen_base, fb_info->screen_base + ofs, fbsize);
+ writel(fb_info->fix.smem_start, host->base + host->devdata->next_buf);
+ }
+
+ line_count = fb_info->fix.smem_len / fb_info->fix.line_length;
+ fb_info->fix.ypanstep = 1;
+
+ clk_enable(host->clk);
+ host->enabled = 1;
+
+ return 0;
+}
+
+static int __devinit mxsfb_init_fbinfo(struct mxsfb_info *host)
+{
+ struct fb_info *fb_info = &host->fb_info;
+ struct fb_var_screeninfo *var = &fb_info->var;
+ struct mxsfb_platform_data *pdata = host->pdev->dev.platform_data;
+ dma_addr_t fb_phys;
+ void *fb_virt;
+ unsigned fb_size = pdata->fb_size;
+
+ fb_info->fbops = &mxsfb_ops;
+ fb_info->flags = FBINFO_FLAG_DEFAULT | FBINFO_READS_FAST;
+ strlcpy(fb_info->fix.id, "mxs", sizeof(fb_info->fix.id));
+ fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
+ fb_info->fix.ypanstep = 1;
+ fb_info->fix.visual = FB_VISUAL_TRUECOLOR,
+ fb_info->fix.accel = FB_ACCEL_NONE;
+
+ var->bits_per_pixel = pdata->default_bpp ? pdata->default_bpp : 16;
+ var->nonstd = 0;
+ var->activate = FB_ACTIVATE_NOW;
+ var->accel_flags = 0;
+ var->vmode = FB_VMODE_NONINTERLACED;
+
+ host->dotclk_delay = pdata->dotclk_delay;
+ host->ld_intf_width = pdata->ld_intf_width;
+
+ /* Memory allocation for framebuffer */
+ if (pdata->fb_phys) {
+ if (!fb_size)
+ return -EINVAL;
+
+ fb_phys = pdata->fb_phys;
+
+ if (!request_mem_region(fb_phys, fb_size, host->pdev->name))
+ return -ENOMEM;
+
+ fb_virt = ioremap(fb_phys, fb_size);
+ if (!fb_virt) {
+ release_mem_region(fb_phys, fb_size);
+ return -ENOMEM;
+ }
+ host->mapped = 1;
+ } else {
+ if (!fb_size)
+ fb_size = SZ_2M; /* default */
+ fb_virt = alloc_pages_exact(fb_size, GFP_DMA);
+ if (!fb_virt)
+ return -ENOMEM;
+
+ fb_phys = virt_to_phys(fb_virt);
+ }
+
+ fb_info->fix.smem_start = fb_phys;
+ fb_info->screen_base = fb_virt;
+ fb_info->screen_size = fb_info->fix.smem_len = fb_size;
+
+ if (mxsfb_restore_mode(host))
+ memset(fb_virt, 0, fb_size);
+
+ return 0;
+}
+
+static void __devexit mxsfb_free_videomem(struct mxsfb_info *host)
+{
+ struct fb_info *fb_info = &host->fb_info;
+
+ if (host->mapped) {
+ iounmap(fb_info->screen_base);
+ release_mem_region(fb_info->fix.smem_start,
+ fb_info->screen_size);
+ } else {
+ free_pages_exact(fb_info->screen_base, fb_info->fix.smem_len);
+ }
+}
+
+static int __devinit mxsfb_probe(struct platform_device *pdev)
+{
+ struct mxsfb_platform_data *pdata = pdev->dev.platform_data;
+ struct resource *res;
+ struct mxsfb_info *host;
+ struct fb_info *fb_info;
+ struct fb_modelist *modelist;
+ int i, ret;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platformdata. Giving up\n");
+ return -ENODEV;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Cannot get memory IO resource\n");
+ return -ENODEV;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name))
+ return -EBUSY;
+
+ fb_info = framebuffer_alloc(sizeof(struct mxsfb_info), &pdev->dev);
+ if (!fb_info) {
+ dev_err(&pdev->dev, "Failed to allocate fbdev\n");
+ ret = -ENOMEM;
+ goto error_alloc_info;
+ }
+
+ host = to_imxfb_host(fb_info);
+
+ host->base = ioremap(res->start, resource_size(res));
+ if (!host->base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto error_ioremap;
+ }
+
+ host->pdev = pdev;
+ platform_set_drvdata(pdev, host);
+
+ host->devdata = &mxsfb_devdata[pdev->id_entry->driver_data];
+
+ host->clk = clk_get(&host->pdev->dev, NULL);
+ if (IS_ERR(host->clk)) {
+ ret = PTR_ERR(host->clk);
+ goto error_getclock;
+ }
+
+ fb_info->pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL);
+ if (!fb_info->pseudo_palette) {
+ ret = -ENOMEM;
+ goto error_pseudo_pallette;
+ }
+
+ INIT_LIST_HEAD(&fb_info->modelist);
+
+ ret = mxsfb_init_fbinfo(host);
+ if (ret != 0)
+ goto error_init_fb;
+
+ for (i = 0; i < pdata->mode_count; i++)
+ fb_add_videomode(&pdata->mode_list[i], &fb_info->modelist);
+
+ modelist = list_first_entry(&fb_info->modelist,
+ struct fb_modelist, list);
+ fb_videomode_to_var(&fb_info->var, &modelist->mode);
+
+ /* init the color fields */
+ mxsfb_check_var(&fb_info->var, fb_info);
+
+ platform_set_drvdata(pdev, fb_info);
+
+ ret = register_framebuffer(fb_info);
+ if (ret != 0) {
+ dev_err(&pdev->dev,"Failed to register framebuffer\n");
+ goto error_register;
+ }
+
+ if (!host->enabled) {
+ writel(0, host->base + LCDC_CTRL);
+ mxsfb_set_par(fb_info);
+ mxsfb_enable_controller(fb_info);
+ }
+
+ dev_info(&pdev->dev, "initialized\n");
+
+ return 0;
+
+error_register:
+ if (host->enabled)
+ clk_disable(host->clk);
+ fb_destroy_modelist(&fb_info->modelist);
+error_init_fb:
+ kfree(fb_info->pseudo_palette);
+error_pseudo_pallette:
+ clk_put(host->clk);
+error_getclock:
+ iounmap(host->base);
+error_ioremap:
+ framebuffer_release(fb_info);
+error_alloc_info:
+ release_mem_region(res->start, resource_size(res));
+
+ return ret;
+}
+
+static int __devexit mxsfb_remove(struct platform_device *pdev)
+{
+ struct fb_info *fb_info = platform_get_drvdata(pdev);
+ struct mxsfb_info *host = to_imxfb_host(fb_info);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (host->enabled)
+ mxsfb_disable_controller(fb_info);
+
+ unregister_framebuffer(fb_info);
+ kfree(fb_info->pseudo_palette);
+ mxsfb_free_videomem(host);
+ iounmap(host->base);
+ clk_put(host->clk);
+
+ framebuffer_release(fb_info);
+ release_mem_region(res->start, resource_size(res));
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_device_id mxsfb_devtype[] = {
+ {
+ .name = "imx23-fb",
+ .driver_data = MXSFB_V3,
+ }, {
+ .name = "imx28-fb",
+ .driver_data = MXSFB_V4,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, mxsfb_devtype);
+
+static struct platform_driver mxsfb_driver = {
+ .probe = mxsfb_probe,
+ .remove = __devexit_p(mxsfb_remove),
+ .id_table = mxsfb_devtype,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init mxsfb_init(void)
+{
+ return platform_driver_register(&mxsfb_driver);
+}
+
+static void __exit mxsfb_exit(void)
+{
+ platform_driver_unregister(&mxsfb_driver);
+}
+
+module_init(mxsfb_init);
+module_exit(mxsfb_exit);
+
+MODULE_DESCRIPTION("Freescale mxs framebuffer driver");
+MODULE_AUTHOR("Sascha Hauer, Pengutronix");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap/Kconfig b/drivers/video/omap/Kconfig
index 083c8fe53e2..15e7f1912af 100644
--- a/drivers/video/omap/Kconfig
+++ b/drivers/video/omap/Kconfig
@@ -5,13 +5,18 @@ config FB_OMAP
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
+ select TWL4030_CORE if MACH_OMAP_2430SDP
help
Frame buffer driver for OMAP based boards.
config FB_OMAP_LCD_VGA
bool "Use LCD in VGA mode"
depends on MACH_OMAP_3430SDP || MACH_OMAP_LDP
-
+ help
+ Set LCD resolution as VGA (640 X 480).
+ Default resolution without this option is QVGA(320 X 240).
+ Please take a look at drivers/video/omap/lcd_ldp.c file
+ for lcd driver code.
choice
depends on FB_OMAP && MACH_OVERO
prompt "Screen resolution"
diff --git a/drivers/video/omap2/displays/Kconfig b/drivers/video/omap2/displays/Kconfig
index 940cab394c2..d18ad6b2372 100644
--- a/drivers/video/omap2/displays/Kconfig
+++ b/drivers/video/omap2/displays/Kconfig
@@ -9,6 +9,12 @@ config PANEL_GENERIC_DPI
Supports LCD Panel used in TI SDP3430 and EVM boards,
OMAP3517 EVM boards and CM-T35.
+config PANEL_LGPHILIPS_LB035Q02
+ tristate "LG.Philips LB035Q02 LCD Panel"
+ depends on OMAP2_DSS && SPI
+ help
+ LCD Panel used on the Gumstix Overo Palo35
+
config PANEL_SHARP_LS037V7DW01
tristate "Sharp LS037V7DW01 LCD Panel"
depends on OMAP2_DSS
diff --git a/drivers/video/omap2/displays/Makefile b/drivers/video/omap2/displays/Makefile
index 861f0255ec6..0f601ab3abf 100644
--- a/drivers/video/omap2/displays/Makefile
+++ b/drivers/video/omap2/displays/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_PANEL_GENERIC_DPI) += panel-generic-dpi.o
+obj-$(CONFIG_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o
obj-$(CONFIG_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
obj-$(CONFIG_PANEL_NEC_NL8048HL11_01B) += panel-nec-nl8048hl11-01b.o
diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c
index 07eb30ee59c..e5691dd55d4 100644
--- a/drivers/video/omap2/displays/panel-generic-dpi.c
+++ b/drivers/video/omap2/displays/panel-generic-dpi.c
@@ -33,6 +33,8 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/i2c.h>
+#include "../../edid.h"
#include <plat/panel-generic-dpi.h>
@@ -156,6 +158,31 @@ static struct panel_config generic_dpi_panels[] = {
.power_off_delay = 0,
.name = "toppoly_tdo35s",
},
+
+ /* Samsung LTE430WQ-F0C */
+ {
+ {
+ .x_res = 480,
+ .y_res = 272,
+
+ .pixel_clock = 9200,
+
+ .hfp = 8,
+ .hsw = 41,
+ .hbp = 45 - 41,
+
+ .vfp = 4,
+ .vsw = 10,
+ .vbp = 12 - 10,
+ },
+ .acbi = 0x0,
+ .acb = 0x0,
+ .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS,
+ .power_on_delay = 0,
+ .power_off_delay = 0,
+ .name = "samsung_lte430wq_f0c",
+ },
};
struct panel_drv_data {
@@ -331,6 +358,100 @@ static int generic_dpi_panel_check_timings(struct omap_dss_device *dssdev,
return dpi_check_timings(dssdev, timings);
}
+/* i2c / edid support */
+
+#define DDC_ADDR 0x50
+
+static int do_probe_ddc_edid(struct i2c_adapter *adapter,
+ unsigned char *buf, int block, int len)
+{
+ unsigned char start = block * EDID_LENGTH;
+ int i;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = DDC_ADDR,
+ .flags = 0,
+ .len = 1,
+ .buf = &start,
+ }, {
+ .addr = DDC_ADDR,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = buf,
+ }
+ };
+
+ /* try at least 3 times, avoid miss for time-out */
+ for (i = 0; i < 3; i++) {
+ if (i2c_transfer(adapter, msgs, 2) == 2)
+ return 0;
+ }
+
+ return -1;
+}
+
+static int generic_dpi_panel_get_edid(struct omap_dss_device *dssdev,
+ u8 *buf, int len)
+{
+ struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
+ struct i2c_adapter *adapter;
+ int i;
+ u8 *edid, *new;
+
+ adapter = i2c_get_adapter(panel_data->i2c_bus_num);
+ if (!adapter) {
+ printk(KERN_ERR "Invalid I2C adapter, bus number: %d\n",
+ panel_data->i2c_bus_num);
+ return -EINVAL;
+ }
+
+ if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
+ return -EINVAL;
+
+ if (do_probe_ddc_edid(adapter, edid, 0, EDID_LENGTH))
+ goto out;
+
+ /* if there are extensions, probe more */
+ if (edid[0x7e] != 0) {
+ new = krealloc(edid, (edid[0x7e] + 1) * EDID_LENGTH,
+ GFP_KERNEL);
+ if (!new)
+ goto out;
+ edid = new;
+
+ for (i = 1; i <= edid[0x7e]; i++) {
+ if (do_probe_ddc_edid(adapter,
+ edid + i * EDID_LENGTH,
+ i, EDID_LENGTH))
+ goto out;
+ }
+ }
+
+ if (edid) {
+ memcpy(buf, edid, len);
+ kfree(edid);
+ return 0;
+ }
+
+out:
+ kfree(edid);
+ return -EINVAL;
+}
+
+static bool generic_dpi_panel_is_detected(struct omap_dss_device *dssdev)
+{
+ struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
+ struct i2c_adapter *adapter;
+ unsigned char out;
+
+ adapter = i2c_get_adapter(panel_data->i2c_bus_num);
+ if (!adapter) {
+ return omapdss_default_is_detected(dssdev);
+ }
+
+ return (do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
+}
+
static struct omap_dss_driver dpi_driver = {
.probe = generic_dpi_panel_probe,
.remove = generic_dpi_panel_remove,
@@ -344,6 +465,9 @@ static struct omap_dss_driver dpi_driver = {
.get_timings = generic_dpi_panel_get_timings,
.check_timings = generic_dpi_panel_check_timings,
+ .get_edid = generic_dpi_panel_get_edid,
+ .is_detected = generic_dpi_panel_is_detected,
+
.driver = {
.name = "generic_dpi_panel",
.owner = THIS_MODULE,
diff --git a/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
new file mode 100644
index 00000000000..271324db243
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
@@ -0,0 +1,279 @@
+/*
+ * LCD panel driver for LG.Philips LB035Q02
+ *
+ * Author: Steve Sakoman <steve@sakoman.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/mutex.h>
+
+#include <plat/display.h>
+
+struct lb035q02_data {
+ struct mutex lock;
+};
+
+static struct omap_video_timings lb035q02_timings = {
+ .x_res = 320,
+ .y_res = 240,
+
+ .pixel_clock = 6500,
+
+ .hsw = 2,
+ .hfp = 20,
+ .hbp = 68,
+
+ .vsw = 2,
+ .vfp = 4,
+ .vbp = 18,
+};
+
+static int lb035q02_panel_power_on(struct omap_dss_device *dssdev)
+{
+ int r;
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ return 0;
+
+ r = omapdss_dpi_display_enable(dssdev);
+ if (r)
+ goto err0;
+
+ if (dssdev->platform_enable) {
+ r = dssdev->platform_enable(dssdev);
+ if (r)
+ goto err1;
+ }
+
+ return 0;
+err1:
+ omapdss_dpi_display_disable(dssdev);
+err0:
+ return r;
+}
+
+static void lb035q02_panel_power_off(struct omap_dss_device *dssdev)
+{
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ return;
+
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+
+ omapdss_dpi_display_disable(dssdev);
+}
+
+static int lb035q02_panel_probe(struct omap_dss_device *dssdev)
+{
+ struct lb035q02_data *ld;
+ int r;
+
+ dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS;
+ dssdev->panel.timings = lb035q02_timings;
+
+ ld = kzalloc(sizeof(*ld), GFP_KERNEL);
+ if (!ld) {
+ r = -ENOMEM;
+ goto err;
+ }
+ mutex_init(&ld->lock);
+ dev_set_drvdata(&dssdev->dev, ld);
+ return 0;
+err:
+ return r;
+}
+
+static void lb035q02_panel_remove(struct omap_dss_device *dssdev)
+{
+ struct lb035q02_data *ld = dev_get_drvdata(&dssdev->dev);
+
+ kfree(ld);
+}
+
+static int lb035q02_panel_enable(struct omap_dss_device *dssdev)
+{
+ struct lb035q02_data *ld = dev_get_drvdata(&dssdev->dev);
+ int r;
+
+ mutex_lock(&ld->lock);
+
+ r = lb035q02_panel_power_on(dssdev);
+ if (r)
+ goto err;
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ mutex_unlock(&ld->lock);
+ return 0;
+err:
+ mutex_unlock(&ld->lock);
+ return r;
+}
+
+static void lb035q02_panel_disable(struct omap_dss_device *dssdev)
+{
+ struct lb035q02_data *ld = dev_get_drvdata(&dssdev->dev);
+
+ mutex_lock(&ld->lock);
+
+ lb035q02_panel_power_off(dssdev);
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+
+ mutex_unlock(&ld->lock);
+}
+
+static int lb035q02_panel_suspend(struct omap_dss_device *dssdev)
+{
+ struct lb035q02_data *ld = dev_get_drvdata(&dssdev->dev);
+
+ mutex_lock(&ld->lock);
+
+ lb035q02_panel_power_off(dssdev);
+ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+
+ mutex_unlock(&ld->lock);
+ return 0;
+}
+
+static int lb035q02_panel_resume(struct omap_dss_device *dssdev)
+{
+ struct lb035q02_data *ld = dev_get_drvdata(&dssdev->dev);
+ int r;
+
+ mutex_lock(&ld->lock);
+
+ r = lb035q02_panel_power_on(dssdev);
+ if (r)
+ goto err;
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ mutex_unlock(&ld->lock);
+ return 0;
+err:
+ mutex_unlock(&ld->lock);
+ return r;
+}
+
+static struct omap_dss_driver lb035q02_driver = {
+ .probe = lb035q02_panel_probe,
+ .remove = lb035q02_panel_remove,
+
+ .enable = lb035q02_panel_enable,
+ .disable = lb035q02_panel_disable,
+ .suspend = lb035q02_panel_suspend,
+ .resume = lb035q02_panel_resume,
+
+ .driver = {
+ .name = "lgphilips_lb035q02_panel",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int lb035q02_write_reg(struct spi_device *spi, u8 reg, u16 val)
+{
+ struct spi_message msg;
+ struct spi_transfer index_xfer = {
+ .len = 3,
+ .cs_change = 1,
+ };
+ struct spi_transfer value_xfer = {
+ .len = 3,
+ };
+ u8 buffer[16];
+
+ spi_message_init(&msg);
+
+ /* register index */
+ buffer[0] = 0x70;
+ buffer[1] = 0x00;
+ buffer[2] = reg & 0x7f;
+ index_xfer.tx_buf = buffer;
+ spi_message_add_tail(&index_xfer, &msg);
+
+ /* register value */
+ buffer[4] = 0x72;
+ buffer[5] = val >> 8;
+ buffer[6] = val;
+ value_xfer.tx_buf = buffer + 4;
+ spi_message_add_tail(&value_xfer, &msg);
+
+ return spi_sync(spi, &msg);
+}
+
+static void init_lb035q02_panel(struct spi_device *spi)
+{
+ /* Init sequence from page 28 of the lb035q02 spec */
+ lb035q02_write_reg(spi, 0x01, 0x6300);
+ lb035q02_write_reg(spi, 0x02, 0x0200);
+ lb035q02_write_reg(spi, 0x03, 0x0177);
+ lb035q02_write_reg(spi, 0x04, 0x04c7);
+ lb035q02_write_reg(spi, 0x05, 0xffc0);
+ lb035q02_write_reg(spi, 0x06, 0xe806);
+ lb035q02_write_reg(spi, 0x0a, 0x4008);
+ lb035q02_write_reg(spi, 0x0b, 0x0000);
+ lb035q02_write_reg(spi, 0x0d, 0x0030);
+ lb035q02_write_reg(spi, 0x0e, 0x2800);
+ lb035q02_write_reg(spi, 0x0f, 0x0000);
+ lb035q02_write_reg(spi, 0x16, 0x9f80);
+ lb035q02_write_reg(spi, 0x17, 0x0a0f);
+ lb035q02_write_reg(spi, 0x1e, 0x00c1);
+ lb035q02_write_reg(spi, 0x30, 0x0300);
+ lb035q02_write_reg(spi, 0x31, 0x0007);
+ lb035q02_write_reg(spi, 0x32, 0x0000);
+ lb035q02_write_reg(spi, 0x33, 0x0000);
+ lb035q02_write_reg(spi, 0x34, 0x0707);
+ lb035q02_write_reg(spi, 0x35, 0x0004);
+ lb035q02_write_reg(spi, 0x36, 0x0302);
+ lb035q02_write_reg(spi, 0x37, 0x0202);
+ lb035q02_write_reg(spi, 0x3a, 0x0a0d);
+ lb035q02_write_reg(spi, 0x3b, 0x0806);
+}
+
+static int __devinit lb035q02_panel_spi_probe(struct spi_device *spi)
+{
+ init_lb035q02_panel(spi);
+ return omap_dss_register_driver(&lb035q02_driver);
+}
+
+static int __devexit lb035q02_panel_spi_remove(struct spi_device *spi)
+{
+ omap_dss_unregister_driver(&lb035q02_driver);
+ return 0;
+}
+
+static struct spi_driver lb035q02_spi_driver = {
+ .driver = {
+ .name = "lgphilips_lb035q02_panel-spi",
+ .owner = THIS_MODULE,
+ },
+ .probe = lb035q02_panel_spi_probe,
+ .remove = __devexit_p(lb035q02_panel_spi_remove),
+};
+
+static int __init lb035q02_panel_drv_init(void)
+{
+ return spi_register_driver(&lb035q02_spi_driver);
+}
+
+static void __exit lb035q02_panel_drv_exit(void)
+{
+ spi_unregister_driver(&lb035q02_spi_driver);
+}
+
+module_init(lb035q02_panel_drv_init);
+module_exit(lb035q02_panel_drv_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index 61026f96ad2..abdfdd81001 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -218,6 +218,8 @@ struct taal_data {
u16 w;
u16 h;
} update_region;
+ int channel;
+
struct delayed_work te_timeout_work;
bool use_dsi_bl;
@@ -257,12 +259,12 @@ static void hw_guard_wait(struct taal_data *td)
}
}
-static int taal_dcs_read_1(u8 dcs_cmd, u8 *data)
+static int taal_dcs_read_1(struct taal_data *td, u8 dcs_cmd, u8 *data)
{
int r;
u8 buf[1];
- r = dsi_vc_dcs_read(TCH, dcs_cmd, buf, 1);
+ r = dsi_vc_dcs_read(td->channel, dcs_cmd, buf, 1);
if (r < 0)
return r;
@@ -272,17 +274,17 @@ static int taal_dcs_read_1(u8 dcs_cmd, u8 *data)
return 0;
}
-static int taal_dcs_write_0(u8 dcs_cmd)
+static int taal_dcs_write_0(struct taal_data *td, u8 dcs_cmd)
{
- return dsi_vc_dcs_write(TCH, &dcs_cmd, 1);
+ return dsi_vc_dcs_write(td->channel, &dcs_cmd, 1);
}
-static int taal_dcs_write_1(u8 dcs_cmd, u8 param)
+static int taal_dcs_write_1(struct taal_data *td, u8 dcs_cmd, u8 param)
{
u8 buf[2];
buf[0] = dcs_cmd;
buf[1] = param;
- return dsi_vc_dcs_write(TCH, buf, 2);
+ return dsi_vc_dcs_write(td->channel, buf, 2);
}
static int taal_sleep_in(struct taal_data *td)
@@ -294,7 +296,7 @@ static int taal_sleep_in(struct taal_data *td)
hw_guard_wait(td);
cmd = DCS_SLEEP_IN;
- r = dsi_vc_dcs_write_nosync(TCH, &cmd, 1);
+ r = dsi_vc_dcs_write_nosync(td->channel, &cmd, 1);
if (r)
return r;
@@ -312,7 +314,7 @@ static int taal_sleep_out(struct taal_data *td)
hw_guard_wait(td);
- r = taal_dcs_write_0(DCS_SLEEP_OUT);
+ r = taal_dcs_write_0(td, DCS_SLEEP_OUT);
if (r)
return r;
@@ -324,30 +326,30 @@ static int taal_sleep_out(struct taal_data *td)
return 0;
}
-static int taal_get_id(u8 *id1, u8 *id2, u8 *id3)
+static int taal_get_id(struct taal_data *td, u8 *id1, u8 *id2, u8 *id3)
{
int r;
- r = taal_dcs_read_1(DCS_GET_ID1, id1);
+ r = taal_dcs_read_1(td, DCS_GET_ID1, id1);
if (r)
return r;
- r = taal_dcs_read_1(DCS_GET_ID2, id2);
+ r = taal_dcs_read_1(td, DCS_GET_ID2, id2);
if (r)
return r;
- r = taal_dcs_read_1(DCS_GET_ID3, id3);
+ r = taal_dcs_read_1(td, DCS_GET_ID3, id3);
if (r)
return r;
return 0;
}
-static int taal_set_addr_mode(u8 rotate, bool mirror)
+static int taal_set_addr_mode(struct taal_data *td, u8 rotate, bool mirror)
{
int r;
u8 mode;
int b5, b6, b7;
- r = taal_dcs_read_1(DCS_READ_MADCTL, &mode);
+ r = taal_dcs_read_1(td, DCS_READ_MADCTL, &mode);
if (r)
return r;
@@ -381,10 +383,11 @@ static int taal_set_addr_mode(u8 rotate, bool mirror)
mode &= ~((1<<7) | (1<<6) | (1<<5));
mode |= (b7 << 7) | (b6 << 6) | (b5 << 5);
- return taal_dcs_write_1(DCS_MEM_ACC_CTRL, mode);
+ return taal_dcs_write_1(td, DCS_MEM_ACC_CTRL, mode);
}
-static int taal_set_update_window(u16 x, u16 y, u16 w, u16 h)
+static int taal_set_update_window(struct taal_data *td,
+ u16 x, u16 y, u16 w, u16 h)
{
int r;
u16 x1 = x;
@@ -399,7 +402,7 @@ static int taal_set_update_window(u16 x, u16 y, u16 w, u16 h)
buf[3] = (x2 >> 8) & 0xff;
buf[4] = (x2 >> 0) & 0xff;
- r = dsi_vc_dcs_write_nosync(TCH, buf, sizeof(buf));
+ r = dsi_vc_dcs_write_nosync(td->channel, buf, sizeof(buf));
if (r)
return r;
@@ -409,11 +412,11 @@ static int taal_set_update_window(u16 x, u16 y, u16 w, u16 h)
buf[3] = (y2 >> 8) & 0xff;
buf[4] = (y2 >> 0) & 0xff;
- r = dsi_vc_dcs_write_nosync(TCH, buf, sizeof(buf));
+ r = dsi_vc_dcs_write_nosync(td->channel, buf, sizeof(buf));
if (r)
return r;
- dsi_vc_send_bta_sync(TCH);
+ dsi_vc_send_bta_sync(td->channel);
return r;
}
@@ -439,7 +442,7 @@ static int taal_bl_update_status(struct backlight_device *dev)
if (td->use_dsi_bl) {
if (td->enabled) {
dsi_bus_lock();
- r = taal_dcs_write_1(DCS_BRIGHTNESS, level);
+ r = taal_dcs_write_1(td, DCS_BRIGHTNESS, level);
dsi_bus_unlock();
} else {
r = 0;
@@ -502,7 +505,7 @@ static ssize_t taal_num_errors_show(struct device *dev,
if (td->enabled) {
dsi_bus_lock();
- r = taal_dcs_read_1(DCS_READ_NUM_ERRORS, &errors);
+ r = taal_dcs_read_1(td, DCS_READ_NUM_ERRORS, &errors);
dsi_bus_unlock();
} else {
r = -ENODEV;
@@ -528,7 +531,7 @@ static ssize_t taal_hw_revision_show(struct device *dev,
if (td->enabled) {
dsi_bus_lock();
- r = taal_get_id(&id1, &id2, &id3);
+ r = taal_get_id(td, &id1, &id2, &id3);
dsi_bus_unlock();
} else {
r = -ENODEV;
@@ -590,7 +593,7 @@ static ssize_t store_cabc_mode(struct device *dev,
if (td->enabled) {
dsi_bus_lock();
if (!td->cabc_broken)
- taal_dcs_write_1(DCS_WRITE_CABC, i);
+ taal_dcs_write_1(td, DCS_WRITE_CABC, i);
dsi_bus_unlock();
}
@@ -774,14 +777,29 @@ static int taal_probe(struct omap_dss_device *dssdev)
dev_dbg(&dssdev->dev, "Using GPIO TE\n");
}
+ r = omap_dsi_request_vc(dssdev, &td->channel);
+ if (r) {
+ dev_err(&dssdev->dev, "failed to get virtual channel\n");
+ goto err_req_vc;
+ }
+
+ r = omap_dsi_set_vc_id(dssdev, td->channel, TCH);
+ if (r) {
+ dev_err(&dssdev->dev, "failed to set VC_ID\n");
+ goto err_vc_id;
+ }
+
r = sysfs_create_group(&dssdev->dev.kobj, &taal_attr_group);
if (r) {
dev_err(&dssdev->dev, "failed to create sysfs files\n");
- goto err_sysfs;
+ goto err_vc_id;
}
return 0;
-err_sysfs:
+
+err_vc_id:
+ omap_dsi_release_vc(dssdev, td->channel);
+err_req_vc:
if (panel_data->use_ext_te)
free_irq(gpio_to_irq(panel_data->ext_te_gpio), dssdev);
err_irq:
@@ -808,6 +826,7 @@ static void taal_remove(struct omap_dss_device *dssdev)
dev_dbg(&dssdev->dev, "remove\n");
sysfs_remove_group(&dssdev->dev.kobj, &taal_attr_group);
+ omap_dsi_release_vc(dssdev, td->channel);
if (panel_data->use_ext_te) {
int gpio = panel_data->ext_te_gpio;
@@ -846,13 +865,13 @@ static int taal_power_on(struct omap_dss_device *dssdev)
taal_hw_reset(dssdev);
- omapdss_dsi_vc_enable_hs(TCH, false);
+ omapdss_dsi_vc_enable_hs(td->channel, false);
r = taal_sleep_out(td);
if (r)
goto err;
- r = taal_get_id(&id1, &id2, &id3);
+ r = taal_get_id(td, &id1, &id2, &id3);
if (r)
goto err;
@@ -861,30 +880,30 @@ static int taal_power_on(struct omap_dss_device *dssdev)
(id2 == 0x00 || id2 == 0xff || id2 == 0x81))
td->cabc_broken = true;
- r = taal_dcs_write_1(DCS_BRIGHTNESS, 0xff);
+ r = taal_dcs_write_1(td, DCS_BRIGHTNESS, 0xff);
if (r)
goto err;
- r = taal_dcs_write_1(DCS_CTRL_DISPLAY,
+ r = taal_dcs_write_1(td, DCS_CTRL_DISPLAY,
(1<<2) | (1<<5)); /* BL | BCTRL */
if (r)
goto err;
- r = taal_dcs_write_1(DCS_PIXEL_FORMAT, 0x7); /* 24bit/pixel */
+ r = taal_dcs_write_1(td, DCS_PIXEL_FORMAT, 0x7); /* 24bit/pixel */
if (r)
goto err;
- r = taal_set_addr_mode(td->rotate, td->mirror);
+ r = taal_set_addr_mode(td, td->rotate, td->mirror);
if (r)
goto err;
if (!td->cabc_broken) {
- r = taal_dcs_write_1(DCS_WRITE_CABC, td->cabc_mode);
+ r = taal_dcs_write_1(td, DCS_WRITE_CABC, td->cabc_mode);
if (r)
goto err;
}
- r = taal_dcs_write_0(DCS_DISPLAY_ON);
+ r = taal_dcs_write_0(td, DCS_DISPLAY_ON);
if (r)
goto err;
@@ -903,7 +922,7 @@ static int taal_power_on(struct omap_dss_device *dssdev)
td->intro_printed = true;
}
- omapdss_dsi_vc_enable_hs(TCH, true);
+ omapdss_dsi_vc_enable_hs(td->channel, true);
return 0;
err:
@@ -921,7 +940,7 @@ static void taal_power_off(struct omap_dss_device *dssdev)
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
int r;
- r = taal_dcs_write_0(DCS_DISPLAY_OFF);
+ r = taal_dcs_write_0(td, DCS_DISPLAY_OFF);
if (!r) {
r = taal_sleep_in(td);
/* HACK: wait a bit so that the message goes through */
@@ -1089,7 +1108,7 @@ static irqreturn_t taal_te_isr(int irq, void *data)
if (old) {
cancel_delayed_work(&td->te_timeout_work);
- r = omap_dsi_update(dssdev, TCH,
+ r = omap_dsi_update(dssdev, td->channel,
td->update_region.x,
td->update_region.y,
td->update_region.w,
@@ -1139,7 +1158,7 @@ static int taal_update(struct omap_dss_device *dssdev,
if (r)
goto err;
- r = taal_set_update_window(x, y, w, h);
+ r = taal_set_update_window(td, x, y, w, h);
if (r)
goto err;
@@ -1153,7 +1172,7 @@ static int taal_update(struct omap_dss_device *dssdev,
msecs_to_jiffies(250));
atomic_set(&td->do_update, 1);
} else {
- r = omap_dsi_update(dssdev, TCH, x, y, w, h,
+ r = omap_dsi_update(dssdev, td->channel, x, y, w, h,
taal_framedone_cb, dssdev);
if (r)
goto err;
@@ -1191,9 +1210,9 @@ static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable)
int r;
if (enable)
- r = taal_dcs_write_1(DCS_TEAR_ON, 0);
+ r = taal_dcs_write_1(td, DCS_TEAR_ON, 0);
else
- r = taal_dcs_write_0(DCS_TEAR_OFF);
+ r = taal_dcs_write_0(td, DCS_TEAR_OFF);
if (!panel_data->use_ext_te)
omapdss_dsi_enable_te(dssdev, enable);
@@ -1263,7 +1282,7 @@ static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
dsi_bus_lock();
if (td->enabled) {
- r = taal_set_addr_mode(rotate, td->mirror);
+ r = taal_set_addr_mode(td, rotate, td->mirror);
if (r)
goto err;
}
@@ -1306,7 +1325,7 @@ static int taal_mirror(struct omap_dss_device *dssdev, bool enable)
dsi_bus_lock();
if (td->enabled) {
- r = taal_set_addr_mode(td->rotate, enable);
+ r = taal_set_addr_mode(td, td->rotate, enable);
if (r)
goto err;
}
@@ -1350,13 +1369,13 @@ static int taal_run_test(struct omap_dss_device *dssdev, int test_num)
dsi_bus_lock();
- r = taal_dcs_read_1(DCS_GET_ID1, &id1);
+ r = taal_dcs_read_1(td, DCS_GET_ID1, &id1);
if (r)
goto err2;
- r = taal_dcs_read_1(DCS_GET_ID2, &id2);
+ r = taal_dcs_read_1(td, DCS_GET_ID2, &id2);
if (r)
goto err2;
- r = taal_dcs_read_1(DCS_GET_ID3, &id3);
+ r = taal_dcs_read_1(td, DCS_GET_ID3, &id3);
if (r)
goto err2;
@@ -1404,9 +1423,9 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
else
plen = 2;
- taal_set_update_window(x, y, w, h);
+ taal_set_update_window(td, x, y, w, h);
- r = dsi_vc_set_max_rx_packet_size(TCH, plen);
+ r = dsi_vc_set_max_rx_packet_size(td->channel, plen);
if (r)
goto err2;
@@ -1414,7 +1433,7 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
u8 dcs_cmd = first ? 0x2e : 0x3e;
first = 0;
- r = dsi_vc_dcs_read(TCH, dcs_cmd,
+ r = dsi_vc_dcs_read(td->channel, dcs_cmd,
buf + buf_used, size - buf_used);
if (r < 0) {
@@ -1440,7 +1459,7 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
r = buf_used;
err3:
- dsi_vc_set_max_rx_packet_size(TCH, 1);
+ dsi_vc_set_max_rx_packet_size(td->channel, 1);
err2:
dsi_bus_unlock();
err1:
@@ -1466,7 +1485,7 @@ static void taal_esd_work(struct work_struct *work)
dsi_bus_lock();
- r = taal_dcs_read_1(DCS_RDDSDR, &state1);
+ r = taal_dcs_read_1(td, DCS_RDDSDR, &state1);
if (r) {
dev_err(&dssdev->dev, "failed to read Taal status\n");
goto err;
@@ -1479,7 +1498,7 @@ static void taal_esd_work(struct work_struct *work)
goto err;
}
- r = taal_dcs_read_1(DCS_RDDSDR, &state2);
+ r = taal_dcs_read_1(td, DCS_RDDSDR, &state2);
if (r) {
dev_err(&dssdev->dev, "failed to read Taal status\n");
goto err;
@@ -1495,7 +1514,7 @@ static void taal_esd_work(struct work_struct *work)
/* Self-diagnostics result is also shown on TE GPIO line. We need
* to re-enable TE after self diagnostics */
if (td->te_enabled && panel_data->use_ext_te) {
- r = taal_dcs_write_1(DCS_TEAR_ON, 0);
+ r = taal_dcs_write_1(td, DCS_TEAR_ON, 0);
if (r)
goto err;
}
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index 43b64403eaa..5c299d5552c 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -1,8 +1,8 @@
menuconfig OMAP2_DSS
- tristate "OMAP2/3 Display Subsystem support (EXPERIMENTAL)"
- depends on ARCH_OMAP2 || ARCH_OMAP3
+ tristate "OMAP2+ Display Subsystem support (EXPERIMENTAL)"
+ depends on ARCH_OMAP2PLUS
help
- OMAP2/3 Display Subsystem support.
+ OMAP2+ Display Subsystem support.
if OMAP2_DSS
@@ -56,10 +56,19 @@ config OMAP2_DSS_RFBI
config OMAP2_DSS_VENC
bool "VENC support"
+ depends on BROKEN
default y
help
OMAP Video Encoder support for S-Video and composite TV-out.
+config OMAP4_DSS_HDMI
+ bool "HDMI support"
+ depends on ARCH_OMAP4
+ default y
+ help
+ HDMI Interface. This adds the High Definition Multimedia Interface.
+ See http://www.hdmi.org/ for HDMI specification.
+
config OMAP2_DSS_SDI
bool "SDI support"
depends on ARCH_OMAP3
diff --git a/drivers/video/omap2/dss/Makefile b/drivers/video/omap2/dss/Makefile
index 7db17b5e570..10d9d3bb3e2 100644
--- a/drivers/video/omap2/dss/Makefile
+++ b/drivers/video/omap2/dss/Makefile
@@ -5,3 +5,5 @@ omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o
omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o
omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o
+omapdss-$(CONFIG_OMAP4_DSS_HDMI) += hdmi.o \
+ hdmi_omap4_panel.o
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 8e89f604928..7d1b9fecc43 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -34,332 +34,26 @@
#include <linux/regulator/consumer.h>
#include <plat/display.h>
-#include <plat/clock.h>
#include "dss.h"
#include "dss_features.h"
static struct {
struct platform_device *pdev;
- int ctx_id;
-
- struct clk *dss_ick;
- struct clk *dss1_fck;
- struct clk *dss2_fck;
- struct clk *dss_54m_fck;
- struct clk *dss_96m_fck;
- unsigned num_clks_enabled;
struct regulator *vdds_dsi_reg;
struct regulator *vdds_sdi_reg;
- struct regulator *vdda_dac_reg;
} core;
-static void dss_clk_enable_all_no_ctx(void);
-static void dss_clk_disable_all_no_ctx(void);
-static void dss_clk_enable_no_ctx(enum dss_clock clks);
-static void dss_clk_disable_no_ctx(enum dss_clock clks);
-
static char *def_disp_name;
module_param_named(def_disp, def_disp_name, charp, 0);
-MODULE_PARM_DESC(def_disp_name, "default display name");
+MODULE_PARM_DESC(def_disp, "default display name");
#ifdef DEBUG
unsigned int dss_debug;
module_param_named(debug, dss_debug, bool, 0644);
#endif
-/* CONTEXT */
-static int dss_get_ctx_id(void)
-{
- struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
- int r;
-
- if (!pdata->get_last_off_on_transaction_id)
- return 0;
- r = pdata->get_last_off_on_transaction_id(&core.pdev->dev);
- if (r < 0) {
- dev_err(&core.pdev->dev, "getting transaction ID failed, "
- "will force context restore\n");
- r = -1;
- }
- return r;
-}
-
-int dss_need_ctx_restore(void)
-{
- int id = dss_get_ctx_id();
-
- if (id < 0 || id != core.ctx_id) {
- DSSDBG("ctx id %d -> id %d\n",
- core.ctx_id, id);
- core.ctx_id = id;
- return 1;
- } else {
- return 0;
- }
-}
-
-static void save_all_ctx(void)
-{
- DSSDBG("save context\n");
-
- dss_clk_enable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK1);
-
- dss_save_context();
- dispc_save_context();
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_save_context();
-#endif
-
- dss_clk_disable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK1);
-}
-
-static void restore_all_ctx(void)
-{
- DSSDBG("restore context\n");
-
- dss_clk_enable_all_no_ctx();
-
- dss_restore_context();
- dispc_restore_context();
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_restore_context();
-#endif
-
- dss_clk_disable_all_no_ctx();
-}
-
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
-/* CLOCKS */
-static void core_dump_clocks(struct seq_file *s)
-{
- int i;
- struct clk *clocks[5] = {
- core.dss_ick,
- core.dss1_fck,
- core.dss2_fck,
- core.dss_54m_fck,
- core.dss_96m_fck
- };
-
- seq_printf(s, "- CORE -\n");
-
- seq_printf(s, "internal clk count\t\t%u\n", core.num_clks_enabled);
-
- for (i = 0; i < 5; i++) {
- if (!clocks[i])
- continue;
- seq_printf(s, "%-15s\t%lu\t%d\n",
- clocks[i]->name,
- clk_get_rate(clocks[i]),
- clocks[i]->usecount);
- }
-}
-#endif /* defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) */
-
-static int dss_get_clock(struct clk **clock, const char *clk_name)
-{
- struct clk *clk;
-
- clk = clk_get(&core.pdev->dev, clk_name);
-
- if (IS_ERR(clk)) {
- DSSERR("can't get clock %s", clk_name);
- return PTR_ERR(clk);
- }
-
- *clock = clk;
-
- DSSDBG("clk %s, rate %ld\n", clk_name, clk_get_rate(clk));
-
- return 0;
-}
-
-static int dss_get_clocks(void)
-{
- int r;
-
- core.dss_ick = NULL;
- core.dss1_fck = NULL;
- core.dss2_fck = NULL;
- core.dss_54m_fck = NULL;
- core.dss_96m_fck = NULL;
-
- r = dss_get_clock(&core.dss_ick, "ick");
- if (r)
- goto err;
-
- r = dss_get_clock(&core.dss1_fck, "dss1_fck");
- if (r)
- goto err;
-
- r = dss_get_clock(&core.dss2_fck, "dss2_fck");
- if (r)
- goto err;
-
- r = dss_get_clock(&core.dss_54m_fck, "tv_fck");
- if (r)
- goto err;
-
- r = dss_get_clock(&core.dss_96m_fck, "video_fck");
- if (r)
- goto err;
-
- return 0;
-
-err:
- if (core.dss_ick)
- clk_put(core.dss_ick);
- if (core.dss1_fck)
- clk_put(core.dss1_fck);
- if (core.dss2_fck)
- clk_put(core.dss2_fck);
- if (core.dss_54m_fck)
- clk_put(core.dss_54m_fck);
- if (core.dss_96m_fck)
- clk_put(core.dss_96m_fck);
-
- return r;
-}
-
-static void dss_put_clocks(void)
-{
- if (core.dss_96m_fck)
- clk_put(core.dss_96m_fck);
- clk_put(core.dss_54m_fck);
- clk_put(core.dss1_fck);
- clk_put(core.dss2_fck);
- clk_put(core.dss_ick);
-}
-
-unsigned long dss_clk_get_rate(enum dss_clock clk)
-{
- switch (clk) {
- case DSS_CLK_ICK:
- return clk_get_rate(core.dss_ick);
- case DSS_CLK_FCK1:
- return clk_get_rate(core.dss1_fck);
- case DSS_CLK_FCK2:
- return clk_get_rate(core.dss2_fck);
- case DSS_CLK_54M:
- return clk_get_rate(core.dss_54m_fck);
- case DSS_CLK_96M:
- return clk_get_rate(core.dss_96m_fck);
- }
-
- BUG();
- return 0;
-}
-
-static unsigned count_clk_bits(enum dss_clock clks)
-{
- unsigned num_clks = 0;
-
- if (clks & DSS_CLK_ICK)
- ++num_clks;
- if (clks & DSS_CLK_FCK1)
- ++num_clks;
- if (clks & DSS_CLK_FCK2)
- ++num_clks;
- if (clks & DSS_CLK_54M)
- ++num_clks;
- if (clks & DSS_CLK_96M)
- ++num_clks;
-
- return num_clks;
-}
-
-static void dss_clk_enable_no_ctx(enum dss_clock clks)
-{
- unsigned num_clks = count_clk_bits(clks);
-
- if (clks & DSS_CLK_ICK)
- clk_enable(core.dss_ick);
- if (clks & DSS_CLK_FCK1)
- clk_enable(core.dss1_fck);
- if (clks & DSS_CLK_FCK2)
- clk_enable(core.dss2_fck);
- if (clks & DSS_CLK_54M)
- clk_enable(core.dss_54m_fck);
- if (clks & DSS_CLK_96M)
- clk_enable(core.dss_96m_fck);
-
- core.num_clks_enabled += num_clks;
-}
-
-void dss_clk_enable(enum dss_clock clks)
-{
- bool check_ctx = core.num_clks_enabled == 0;
-
- dss_clk_enable_no_ctx(clks);
-
- if (check_ctx && cpu_is_omap34xx() && dss_need_ctx_restore())
- restore_all_ctx();
-}
-
-static void dss_clk_disable_no_ctx(enum dss_clock clks)
-{
- unsigned num_clks = count_clk_bits(clks);
-
- if (clks & DSS_CLK_ICK)
- clk_disable(core.dss_ick);
- if (clks & DSS_CLK_FCK1)
- clk_disable(core.dss1_fck);
- if (clks & DSS_CLK_FCK2)
- clk_disable(core.dss2_fck);
- if (clks & DSS_CLK_54M)
- clk_disable(core.dss_54m_fck);
- if (clks & DSS_CLK_96M)
- clk_disable(core.dss_96m_fck);
-
- core.num_clks_enabled -= num_clks;
-}
-
-void dss_clk_disable(enum dss_clock clks)
-{
- if (cpu_is_omap34xx()) {
- unsigned num_clks = count_clk_bits(clks);
-
- BUG_ON(core.num_clks_enabled < num_clks);
-
- if (core.num_clks_enabled == num_clks)
- save_all_ctx();
- }
-
- dss_clk_disable_no_ctx(clks);
-}
-
-static void dss_clk_enable_all_no_ctx(void)
-{
- enum dss_clock clks;
-
- clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M;
- if (cpu_is_omap34xx())
- clks |= DSS_CLK_96M;
- dss_clk_enable_no_ctx(clks);
-}
-
-static void dss_clk_disable_all_no_ctx(void)
-{
- enum dss_clock clks;
-
- clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M;
- if (cpu_is_omap34xx())
- clks |= DSS_CLK_96M;
- dss_clk_disable_no_ctx(clks);
-}
-
-static void dss_clk_disable_all(void)
-{
- enum dss_clock clks;
-
- clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M;
- if (cpu_is_omap34xx())
- clks |= DSS_CLK_96M;
- dss_clk_disable(clks);
-}
-
/* REGULATORS */
struct regulator *dss_get_vdds_dsi(void)
@@ -390,32 +84,7 @@ struct regulator *dss_get_vdds_sdi(void)
return reg;
}
-struct regulator *dss_get_vdda_dac(void)
-{
- struct regulator *reg;
-
- if (core.vdda_dac_reg != NULL)
- return core.vdda_dac_reg;
-
- reg = regulator_get(&core.pdev->dev, "vdda_dac");
- if (!IS_ERR(reg))
- core.vdda_dac_reg = reg;
-
- return reg;
-}
-
-/* DEBUGFS */
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
-static void dss_debug_dump_clocks(struct seq_file *s)
-{
- core_dump_clocks(s);
- dss_dump_clocks(s);
- dispc_dump_clocks(s);
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_dump_clocks(s);
-#endif
-}
-
static int dss_debug_show(struct seq_file *s, void *unused)
{
void (*func)(struct seq_file *) = s->private;
@@ -497,7 +166,6 @@ static inline void dss_uninitialize_debugfs(void)
static int omap_dss_probe(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
- int skip_init = 0;
int r;
int i;
@@ -508,63 +176,43 @@ static int omap_dss_probe(struct platform_device *pdev)
dss_init_overlay_managers(pdev);
dss_init_overlays(pdev);
- r = dss_get_clocks();
- if (r)
- goto err_clocks;
-
- dss_clk_enable_all_no_ctx();
-
- core.ctx_id = dss_get_ctx_id();
- DSSDBG("initial ctx id %u\n", core.ctx_id);
-
-#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT
- /* DISPC_CONTROL */
- if (omap_readl(0x48050440) & 1) /* LCD enabled? */
- skip_init = 1;
-#endif
-
- r = dss_init(skip_init);
+ r = dss_init_platform_driver();
if (r) {
- DSSERR("Failed to initialize DSS\n");
+ DSSERR("Failed to initialize DSS platform driver\n");
goto err_dss;
}
- r = rfbi_init();
- if (r) {
- DSSERR("Failed to initialize rfbi\n");
- goto err_rfbi;
- }
+ /* keep clocks enabled to prevent context saves/restores during init */
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
- r = dpi_init(pdev);
+ r = rfbi_init_platform_driver();
if (r) {
- DSSERR("Failed to initialize dpi\n");
- goto err_dpi;
+ DSSERR("Failed to initialize rfbi platform driver\n");
+ goto err_rfbi;
}
- r = dispc_init();
+ r = dispc_init_platform_driver();
if (r) {
- DSSERR("Failed to initialize dispc\n");
+ DSSERR("Failed to initialize dispc platform driver\n");
goto err_dispc;
}
- r = venc_init(pdev);
+ r = venc_init_platform_driver();
if (r) {
- DSSERR("Failed to initialize venc\n");
+ DSSERR("Failed to initialize venc platform driver\n");
goto err_venc;
}
- if (cpu_is_omap34xx()) {
- r = sdi_init(skip_init);
- if (r) {
- DSSERR("Failed to initialize SDI\n");
- goto err_sdi;
- }
+ r = dsi_init_platform_driver();
+ if (r) {
+ DSSERR("Failed to initialize DSI platform driver\n");
+ goto err_dsi;
+ }
- r = dsi_init(pdev);
- if (r) {
- DSSERR("Failed to initialize DSI\n");
- goto err_dsi;
- }
+ r = hdmi_init_platform_driver();
+ if (r) {
+ DSSERR("Failed to initialize hdmi\n");
+ goto err_hdmi;
}
r = dss_initialize_debugfs();
@@ -589,32 +237,25 @@ static int omap_dss_probe(struct platform_device *pdev)
pdata->default_device = dssdev;
}
- dss_clk_disable_all();
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
return 0;
err_register:
dss_uninitialize_debugfs();
err_debugfs:
- if (cpu_is_omap34xx())
- dsi_exit();
+ hdmi_uninit_platform_driver();
+err_hdmi:
+ dsi_uninit_platform_driver();
err_dsi:
- if (cpu_is_omap34xx())
- sdi_exit();
-err_sdi:
- venc_exit();
+ venc_uninit_platform_driver();
err_venc:
- dispc_exit();
+ dispc_uninit_platform_driver();
err_dispc:
- dpi_exit();
-err_dpi:
- rfbi_exit();
+ rfbi_uninit_platform_driver();
err_rfbi:
- dss_exit();
+ dss_uninit_platform_driver();
err_dss:
- dss_clk_disable_all_no_ctx();
- dss_put_clocks();
-err_clocks:
return r;
}
@@ -623,61 +264,15 @@ static int omap_dss_remove(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
int i;
- int c;
dss_uninitialize_debugfs();
- venc_exit();
- dispc_exit();
- dpi_exit();
- rfbi_exit();
- if (cpu_is_omap34xx()) {
- dsi_exit();
- sdi_exit();
- }
-
- dss_exit();
-
- /* these should be removed at some point */
- c = core.dss_ick->usecount;
- if (c > 0) {
- DSSERR("warning: dss_ick usecount %d, disabling\n", c);
- while (c-- > 0)
- clk_disable(core.dss_ick);
- }
-
- c = core.dss1_fck->usecount;
- if (c > 0) {
- DSSERR("warning: dss1_fck usecount %d, disabling\n", c);
- while (c-- > 0)
- clk_disable(core.dss1_fck);
- }
-
- c = core.dss2_fck->usecount;
- if (c > 0) {
- DSSERR("warning: dss2_fck usecount %d, disabling\n", c);
- while (c-- > 0)
- clk_disable(core.dss2_fck);
- }
-
- c = core.dss_54m_fck->usecount;
- if (c > 0) {
- DSSERR("warning: dss_54m_fck usecount %d, disabling\n", c);
- while (c-- > 0)
- clk_disable(core.dss_54m_fck);
- }
-
- if (core.dss_96m_fck) {
- c = core.dss_96m_fck->usecount;
- if (c > 0) {
- DSSERR("warning: dss_96m_fck usecount %d, disabling\n",
- c);
- while (c-- > 0)
- clk_disable(core.dss_96m_fck);
- }
- }
-
- dss_put_clocks();
+ venc_uninit_platform_driver();
+ dispc_uninit_platform_driver();
+ rfbi_uninit_platform_driver();
+ dsi_uninit_platform_driver();
+ hdmi_uninit_platform_driver();
+ dss_uninit_platform_driver();
dss_uninit_overlays(pdev);
dss_uninit_overlay_managers(pdev);
@@ -839,6 +434,12 @@ int omap_dss_register_driver(struct omap_dss_driver *dssdriver)
if (dssdriver->get_recommended_bpp == NULL)
dssdriver->get_recommended_bpp =
omapdss_default_get_recommended_bpp;
+ if (!dssdriver->check_timings)
+ dssdriver->check_timings = omapdss_default_check_timings;
+ if (!dssdriver->get_timings)
+ dssdriver->get_timings = omapdss_default_get_timings;
+ if (!dssdriver->is_detected)
+ dssdriver->is_detected = omapdss_default_is_detected;
return driver_register(&dssdriver->driver);
}
@@ -896,6 +497,9 @@ int omap_dss_register_device(struct omap_dss_device *dssdev)
dssdev->dev.parent = &dss_bus;
dssdev->dev.release = omap_dss_dev_release;
dev_set_name(&dssdev->dev, "display%d", dev_num++);
+
+ BLOCKING_INIT_NOTIFIER_HEAD(&dssdev->notifier);
+
return device_register(&dssdev->dev);
}
@@ -965,11 +569,6 @@ static void __exit omap_dss_exit(void)
core.vdds_sdi_reg = NULL;
}
- if (core.vdda_dac_reg != NULL) {
- regulator_put(core.vdda_dac_reg);
- core.vdda_dac_reg = NULL;
- }
-
platform_driver_unregister(&omap_dss_driver);
omap_dss_bus_unregister();
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 9f8c69f16e6..7804779c9da 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -32,6 +32,7 @@
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/hardirq.h>
+#include <linux/interrupt.h>
#include <plat/sram.h>
#include <plat/clock.h>
@@ -42,8 +43,6 @@
#include "dss_features.h"
/* DISPC */
-#define DISPC_BASE 0x48050400
-
#define DISPC_SZ_REGS SZ_4K
struct dispc_reg { u16 idx; };
@@ -74,7 +73,7 @@ struct dispc_reg { u16 idx; };
#define DISPC_TIMING_H(ch) DISPC_REG(ch != 2 ? 0x0064 : 0x0400)
#define DISPC_TIMING_V(ch) DISPC_REG(ch != 2 ? 0x0068 : 0x0404)
#define DISPC_POL_FREQ(ch) DISPC_REG(ch != 2 ? 0x006C : 0x0408)
-#define DISPC_DIVISOR(ch) DISPC_REG(ch != 2 ? 0x0070 : 0x040C)
+#define DISPC_DIVISORo(ch) DISPC_REG(ch != 2 ? 0x0070 : 0x040C)
#define DISPC_GLOBAL_ALPHA DISPC_REG(0x0074)
#define DISPC_SIZE_DIG DISPC_REG(0x0078)
#define DISPC_SIZE_LCD(ch) DISPC_REG(ch != 2 ? 0x007C : 0x03CC)
@@ -129,6 +128,7 @@ struct dispc_reg { u16 idx; };
#define DISPC_VID_PRELOAD(n) DISPC_REG(0x230 + (n)*0x04)
+#define DISPC_DIVISOR DISPC_REG(0x0804)
#define DISPC_IRQ_MASK_ERROR (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \
DISPC_IRQ_OCP_ERR | \
@@ -178,7 +178,9 @@ struct dispc_irq_stats {
};
static struct {
+ struct platform_device *pdev;
void __iomem *base;
+ int irq;
u32 fifo_size[3];
@@ -230,7 +232,7 @@ void dispc_save_context(void)
SR(TIMING_H(0));
SR(TIMING_V(0));
SR(POL_FREQ(0));
- SR(DIVISOR(0));
+ SR(DIVISORo(0));
SR(GLOBAL_ALPHA);
SR(SIZE_DIG);
SR(SIZE_LCD(0));
@@ -242,7 +244,7 @@ void dispc_save_context(void)
SR(TIMING_H(2));
SR(TIMING_V(2));
SR(POL_FREQ(2));
- SR(DIVISOR(2));
+ SR(DIVISORo(2));
SR(CONFIG2);
}
@@ -373,6 +375,9 @@ void dispc_save_context(void)
SR(VID_FIR_COEF_V(1, 7));
SR(VID_PRELOAD(1));
+
+ if (dss_has_feature(FEAT_CORE_CLK_DIV))
+ SR(DIVISOR);
}
void dispc_restore_context(void)
@@ -389,7 +394,7 @@ void dispc_restore_context(void)
RR(TIMING_H(0));
RR(TIMING_V(0));
RR(POL_FREQ(0));
- RR(DIVISOR(0));
+ RR(DIVISORo(0));
RR(GLOBAL_ALPHA);
RR(SIZE_DIG);
RR(SIZE_LCD(0));
@@ -400,7 +405,7 @@ void dispc_restore_context(void)
RR(TIMING_H(2));
RR(TIMING_V(2));
RR(POL_FREQ(2));
- RR(DIVISOR(2));
+ RR(DIVISORo(2));
RR(CONFIG2);
}
@@ -532,6 +537,9 @@ void dispc_restore_context(void)
RR(VID_PRELOAD(1));
+ if (dss_has_feature(FEAT_CORE_CLK_DIV))
+ RR(DIVISOR);
+
/* enable last, because LCD & DIGIT enable are here */
RR(CONTROL);
if (dss_has_feature(FEAT_MGR_LCD2))
@@ -552,9 +560,9 @@ void dispc_restore_context(void)
static inline void enable_clocks(bool enable)
{
if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
}
bool dispc_go_busy(enum omap_channel channel)
@@ -1000,6 +1008,20 @@ void dispc_set_burst_size(enum omap_plane plane,
enable_clocks(0);
}
+void dispc_enable_gamma_table(bool enable)
+{
+ /*
+ * This is partially implemented to support only disabling of
+ * the gamma table.
+ */
+ if (enable) {
+ DSSWARN("Gamma table enabling for TV not yet supported");
+ return;
+ }
+
+ REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9);
+}
+
static void _dispc_set_vid_color_conv(enum omap_plane plane, bool enable)
{
u32 val;
@@ -1129,10 +1151,16 @@ static void _dispc_set_vid_accu0(enum omap_plane plane, int haccu, int vaccu)
u32 val;
const struct dispc_reg ac0_reg[] = { DISPC_VID_ACCU0(0),
DISPC_VID_ACCU0(1) };
+ u8 hor_start, hor_end, vert_start, vert_end;
BUG_ON(plane == OMAP_DSS_GFX);
- val = FLD_VAL(vaccu, 25, 16) | FLD_VAL(haccu, 9, 0);
+ dss_feat_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end);
+ dss_feat_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end);
+
+ val = FLD_VAL(vaccu, vert_start, vert_end) |
+ FLD_VAL(haccu, hor_start, hor_end);
+
dispc_write_reg(ac0_reg[plane-1], val);
}
@@ -1141,10 +1169,16 @@ static void _dispc_set_vid_accu1(enum omap_plane plane, int haccu, int vaccu)
u32 val;
const struct dispc_reg ac1_reg[] = { DISPC_VID_ACCU1(0),
DISPC_VID_ACCU1(1) };
+ u8 hor_start, hor_end, vert_start, vert_end;
BUG_ON(plane == OMAP_DSS_GFX);
- val = FLD_VAL(vaccu, 25, 16) | FLD_VAL(haccu, 9, 0);
+ dss_feat_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end);
+ dss_feat_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end);
+
+ val = FLD_VAL(vaccu, vert_start, vert_end) |
+ FLD_VAL(haccu, hor_start, hor_end);
+
dispc_write_reg(ac1_reg[plane-1], val);
}
@@ -1182,16 +1216,25 @@ static void _dispc_set_scaling(enum omap_plane plane,
_dispc_set_fir(plane, fir_hinc, fir_vinc);
l = dispc_read_reg(dispc_reg_att[plane]);
- l &= ~((0x0f << 5) | (0x3 << 21));
+ /* RESIZEENABLE and VERTICALTAPS */
+ l &= ~((0x3 << 5) | (0x1 << 21));
l |= fir_hinc ? (1 << 5) : 0;
l |= fir_vinc ? (1 << 6) : 0;
+ l |= five_taps ? (1 << 21) : 0;
- l |= hscaleup ? 0 : (1 << 7);
- l |= vscaleup ? 0 : (1 << 8);
+ /* VRESIZECONF and HRESIZECONF */
+ if (dss_has_feature(FEAT_RESIZECONF)) {
+ l &= ~(0x3 << 7);
+ l |= hscaleup ? 0 : (1 << 7);
+ l |= vscaleup ? 0 : (1 << 8);
+ }
- l |= five_taps ? (1 << 21) : 0;
- l |= five_taps ? (1 << 22) : 0;
+ /* LINEBUFFERSPLIT */
+ if (dss_has_feature(FEAT_LINEBUFFERSPLIT)) {
+ l &= ~(0x1 << 22);
+ l |= five_taps ? (1 << 22) : 0;
+ }
dispc_write_reg(dispc_reg_att[plane], l);
@@ -1215,9 +1258,11 @@ static void _dispc_set_scaling(enum omap_plane plane,
static void _dispc_set_rotation_attrs(enum omap_plane plane, u8 rotation,
bool mirroring, enum omap_color_mode color_mode)
{
+ bool row_repeat = false;
+ int vidrot = 0;
+
if (color_mode == OMAP_DSS_COLOR_YUV2 ||
color_mode == OMAP_DSS_COLOR_UYVY) {
- int vidrot = 0;
if (mirroring) {
switch (rotation) {
@@ -1251,16 +1296,15 @@ static void _dispc_set_rotation_attrs(enum omap_plane plane, u8 rotation,
}
}
- REG_FLD_MOD(dispc_reg_att[plane], vidrot, 13, 12);
-
if (rotation == OMAP_DSS_ROT_90 || rotation == OMAP_DSS_ROT_270)
- REG_FLD_MOD(dispc_reg_att[plane], 0x1, 18, 18);
+ row_repeat = true;
else
- REG_FLD_MOD(dispc_reg_att[plane], 0x0, 18, 18);
- } else {
- REG_FLD_MOD(dispc_reg_att[plane], 0, 13, 12);
- REG_FLD_MOD(dispc_reg_att[plane], 0, 18, 18);
+ row_repeat = false;
}
+
+ REG_FLD_MOD(dispc_reg_att[plane], vidrot, 13, 12);
+ if (dss_has_feature(FEAT_ROWREPEATENABLE))
+ REG_FLD_MOD(dispc_reg_att[plane], row_repeat ? 1 : 0, 18, 18);
}
static int color_mode_to_bpp(enum omap_color_mode color_mode)
@@ -2293,7 +2337,7 @@ static void dispc_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
BUG_ON(pck_div < 2);
enable_clocks(1);
- dispc_write_reg(DISPC_DIVISOR(channel),
+ dispc_write_reg(DISPC_DIVISORo(channel),
FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0));
enable_clocks(0);
}
@@ -2302,7 +2346,7 @@ static void dispc_get_lcd_divisor(enum omap_channel channel, int *lck_div,
int *pck_div)
{
u32 l;
- l = dispc_read_reg(DISPC_DIVISOR(channel));
+ l = dispc_read_reg(DISPC_DIVISORo(channel));
*lck_div = FLD_GET(l, 23, 16);
*pck_div = FLD_GET(l, 7, 0);
}
@@ -2311,14 +2355,17 @@ unsigned long dispc_fclk_rate(void)
{
unsigned long r = 0;
- if (dss_get_dispc_clk_source() == DSS_SRC_DSS1_ALWON_FCLK)
- r = dss_clk_get_rate(DSS_CLK_FCK1);
- else
-#ifdef CONFIG_OMAP2_DSS_DSI
- r = dsi_get_dsi1_pll_rate();
-#else
- BUG();
-#endif
+ switch (dss_get_dispc_clk_source()) {
+ case DSS_CLK_SRC_FCK:
+ r = dss_clk_get_rate(DSS_CLK_FCK);
+ break;
+ case DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
+ r = dsi_get_pll_hsdiv_dispc_rate();
+ break;
+ default:
+ BUG();
+ }
+
return r;
}
@@ -2328,47 +2375,72 @@ unsigned long dispc_lclk_rate(enum omap_channel channel)
unsigned long r;
u32 l;
- l = dispc_read_reg(DISPC_DIVISOR(channel));
+ l = dispc_read_reg(DISPC_DIVISORo(channel));
lcd = FLD_GET(l, 23, 16);
- r = dispc_fclk_rate();
+ switch (dss_get_lcd_clk_source(channel)) {
+ case DSS_CLK_SRC_FCK:
+ r = dss_clk_get_rate(DSS_CLK_FCK);
+ break;
+ case DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
+ r = dsi_get_pll_hsdiv_dispc_rate();
+ break;
+ default:
+ BUG();
+ }
return r / lcd;
}
unsigned long dispc_pclk_rate(enum omap_channel channel)
{
- int lcd, pcd;
+ int pcd;
unsigned long r;
u32 l;
- l = dispc_read_reg(DISPC_DIVISOR(channel));
+ l = dispc_read_reg(DISPC_DIVISORo(channel));
- lcd = FLD_GET(l, 23, 16);
pcd = FLD_GET(l, 7, 0);
- r = dispc_fclk_rate();
+ r = dispc_lclk_rate(channel);
- return r / lcd / pcd;
+ return r / pcd;
}
void dispc_dump_clocks(struct seq_file *s)
{
int lcd, pcd;
+ u32 l;
+ enum dss_clk_source dispc_clk_src = dss_get_dispc_clk_source();
+ enum dss_clk_source lcd_clk_src;
enable_clocks(1);
seq_printf(s, "- DISPC -\n");
- seq_printf(s, "dispc fclk source = %s\n",
- dss_get_dispc_clk_source() == DSS_SRC_DSS1_ALWON_FCLK ?
- "dss1_alwon_fclk" : "dsi1_pll_fclk");
+ seq_printf(s, "dispc fclk source = %s (%s)\n",
+ dss_get_generic_clk_source_name(dispc_clk_src),
+ dss_feat_get_clk_source_name(dispc_clk_src));
seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate());
+ if (dss_has_feature(FEAT_CORE_CLK_DIV)) {
+ seq_printf(s, "- DISPC-CORE-CLK -\n");
+ l = dispc_read_reg(DISPC_DIVISOR);
+ lcd = FLD_GET(l, 23, 16);
+
+ seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
+ (dispc_fclk_rate()/lcd), lcd);
+ }
seq_printf(s, "- LCD1 -\n");
+ lcd_clk_src = dss_get_lcd_clk_source(OMAP_DSS_CHANNEL_LCD);
+
+ seq_printf(s, "lcd1_clk source = %s (%s)\n",
+ dss_get_generic_clk_source_name(lcd_clk_src),
+ dss_feat_get_clk_source_name(lcd_clk_src));
+
dispc_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD, &lcd, &pcd);
seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
@@ -2378,6 +2450,12 @@ void dispc_dump_clocks(struct seq_file *s)
if (dss_has_feature(FEAT_MGR_LCD2)) {
seq_printf(s, "- LCD2 -\n");
+ lcd_clk_src = dss_get_lcd_clk_source(OMAP_DSS_CHANNEL_LCD2);
+
+ seq_printf(s, "lcd2_clk source = %s (%s)\n",
+ dss_get_generic_clk_source_name(lcd_clk_src),
+ dss_feat_get_clk_source_name(lcd_clk_src));
+
dispc_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD2, &lcd, &pcd);
seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
@@ -2440,7 +2518,7 @@ void dispc_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dispc_read_reg(r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
DUMPREG(DISPC_REVISION);
DUMPREG(DISPC_SYSCONFIG);
@@ -2459,7 +2537,7 @@ void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_TIMING_H(0));
DUMPREG(DISPC_TIMING_V(0));
DUMPREG(DISPC_POL_FREQ(0));
- DUMPREG(DISPC_DIVISOR(0));
+ DUMPREG(DISPC_DIVISORo(0));
DUMPREG(DISPC_GLOBAL_ALPHA);
DUMPREG(DISPC_SIZE_DIG);
DUMPREG(DISPC_SIZE_LCD(0));
@@ -2471,7 +2549,7 @@ void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_TIMING_H(2));
DUMPREG(DISPC_TIMING_V(2));
DUMPREG(DISPC_POL_FREQ(2));
- DUMPREG(DISPC_DIVISOR(2));
+ DUMPREG(DISPC_DIVISORo(2));
DUMPREG(DISPC_SIZE_LCD(2));
}
@@ -2597,7 +2675,7 @@ void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_VID_PRELOAD(0));
DUMPREG(DISPC_VID_PRELOAD(1));
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
#undef DUMPREG
}
@@ -2713,8 +2791,8 @@ int dispc_get_clock_div(enum omap_channel channel,
fck = dispc_fclk_rate();
- cinfo->lck_div = REG_GET(DISPC_DIVISOR(channel), 23, 16);
- cinfo->pck_div = REG_GET(DISPC_DIVISOR(channel), 7, 0);
+ cinfo->lck_div = REG_GET(DISPC_DIVISORo(channel), 23, 16);
+ cinfo->pck_div = REG_GET(DISPC_DIVISORo(channel), 7, 0);
cinfo->lck = fck / cinfo->lck_div;
cinfo->pck = cinfo->lck / cinfo->pck_div;
@@ -2791,6 +2869,9 @@ int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
break;
}
+ if (ret)
+ goto err;
+
_omap_dispc_set_irqs();
spin_unlock_irqrestore(&dispc.irq_lock, flags);
@@ -2866,10 +2947,10 @@ static void print_irq_status(u32 status)
* but we presume they are on because we got an IRQ. However,
* an irq handler may turn the clocks off, so we may not have
* clock later in the function. */
-void dispc_irq_handler(void)
+static irqreturn_t omap_dispc_irq_handler(int irq, void *arg)
{
int i;
- u32 irqstatus;
+ u32 irqstatus, irqenable;
u32 handledirqs = 0;
u32 unhandled_errors;
struct omap_dispc_isr_data *isr_data;
@@ -2878,6 +2959,13 @@ void dispc_irq_handler(void)
spin_lock(&dispc.irq_lock);
irqstatus = dispc_read_reg(DISPC_IRQSTATUS);
+ irqenable = dispc_read_reg(DISPC_IRQENABLE);
+
+ /* IRQ is not for us */
+ if (!(irqstatus & irqenable)) {
+ spin_unlock(&dispc.irq_lock);
+ return IRQ_NONE;
+ }
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
spin_lock(&dispc.irq_stats_lock);
@@ -2929,6 +3017,8 @@ void dispc_irq_handler(void)
}
spin_unlock(&dispc.irq_lock);
+
+ return IRQ_HANDLED;
}
static void dispc_error_worker(struct work_struct *work)
@@ -3253,6 +3343,15 @@ static void _omap_dispc_initial_config(void)
l = FLD_MOD(l, 1, 0, 0); /* AUTOIDLE */
dispc_write_reg(DISPC_SYSCONFIG, l);
+ /* Exclusively enable DISPC_CORE_CLK and set divider to 1 */
+ if (dss_has_feature(FEAT_CORE_CLK_DIV)) {
+ l = dispc_read_reg(DISPC_DIVISOR);
+ /* Use DISPC_DIVISOR.LCD, instead of DISPC_DIVISOR1.LCD */
+ l = FLD_MOD(l, 1, 0, 0);
+ l = FLD_MOD(l, 1, 23, 16);
+ dispc_write_reg(DISPC_DIVISOR, l);
+ }
+
/* FUNCGATED */
if (dss_has_feature(FEAT_FUNCGATED))
REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
@@ -3269,47 +3368,6 @@ static void _omap_dispc_initial_config(void)
dispc_read_plane_fifo_sizes();
}
-int dispc_init(void)
-{
- u32 rev;
-
- spin_lock_init(&dispc.irq_lock);
-
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- spin_lock_init(&dispc.irq_stats_lock);
- dispc.irq_stats.last_reset = jiffies;
-#endif
-
- INIT_WORK(&dispc.error_work, dispc_error_worker);
-
- dispc.base = ioremap(DISPC_BASE, DISPC_SZ_REGS);
- if (!dispc.base) {
- DSSERR("can't ioremap DISPC\n");
- return -ENOMEM;
- }
-
- enable_clocks(1);
-
- _omap_dispc_initial_config();
-
- _omap_dispc_initialize_irq();
-
- dispc_save_context();
-
- rev = dispc_read_reg(DISPC_REVISION);
- printk(KERN_INFO "OMAP DISPC rev %d.%d\n",
- FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
-
- enable_clocks(0);
-
- return 0;
-}
-
-void dispc_exit(void)
-{
- iounmap(dispc.base);
-}
-
int dispc_enable_plane(enum omap_plane plane, bool enable)
{
DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
@@ -3359,3 +3417,94 @@ int dispc_setup_plane(enum omap_plane plane,
return r;
}
+
+/* DISPC HW IP initialisation */
+static int omap_dispchw_probe(struct platform_device *pdev)
+{
+ u32 rev;
+ int r = 0;
+ struct resource *dispc_mem;
+
+ dispc.pdev = pdev;
+
+ spin_lock_init(&dispc.irq_lock);
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ spin_lock_init(&dispc.irq_stats_lock);
+ dispc.irq_stats.last_reset = jiffies;
+#endif
+
+ INIT_WORK(&dispc.error_work, dispc_error_worker);
+
+ dispc_mem = platform_get_resource(dispc.pdev, IORESOURCE_MEM, 0);
+ if (!dispc_mem) {
+ DSSERR("can't get IORESOURCE_MEM DISPC\n");
+ r = -EINVAL;
+ goto fail0;
+ }
+ dispc.base = ioremap(dispc_mem->start, resource_size(dispc_mem));
+ if (!dispc.base) {
+ DSSERR("can't ioremap DISPC\n");
+ r = -ENOMEM;
+ goto fail0;
+ }
+ dispc.irq = platform_get_irq(dispc.pdev, 0);
+ if (dispc.irq < 0) {
+ DSSERR("platform_get_irq failed\n");
+ r = -ENODEV;
+ goto fail1;
+ }
+
+ r = request_irq(dispc.irq, omap_dispc_irq_handler, IRQF_SHARED,
+ "OMAP DISPC", dispc.pdev);
+ if (r < 0) {
+ DSSERR("request_irq failed\n");
+ goto fail1;
+ }
+
+ enable_clocks(1);
+
+ _omap_dispc_initial_config();
+
+ _omap_dispc_initialize_irq();
+
+ dispc_save_context();
+
+ rev = dispc_read_reg(DISPC_REVISION);
+ dev_dbg(&pdev->dev, "OMAP DISPC rev %d.%d\n",
+ FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+
+ enable_clocks(0);
+
+ return 0;
+fail1:
+ iounmap(dispc.base);
+fail0:
+ return r;
+}
+
+static int omap_dispchw_remove(struct platform_device *pdev)
+{
+ free_irq(dispc.irq, dispc.pdev);
+ iounmap(dispc.base);
+ return 0;
+}
+
+static struct platform_driver omap_dispchw_driver = {
+ .probe = omap_dispchw_probe,
+ .remove = omap_dispchw_remove,
+ .driver = {
+ .name = "omapdss_dispc",
+ .owner = THIS_MODULE,
+ },
+};
+
+int dispc_init_platform_driver(void)
+{
+ return platform_driver_register(&omap_dispchw_driver);
+}
+
+void dispc_uninit_platform_driver(void)
+{
+ return platform_driver_unregister(&omap_dispchw_driver);
+}
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index 22dd7a474f7..b1466d8d388 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -25,14 +25,12 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/jiffies.h>
-#include <linux/list.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <plat/display.h>
#include "dss.h"
-static LIST_HEAD(display_list);
-
static ssize_t display_enabled_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -315,6 +313,32 @@ void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
}
EXPORT_SYMBOL(omapdss_default_get_resolution);
+void omapdss_default_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ *timings = dssdev->panel.timings;
+}
+EXPORT_SYMBOL(omapdss_default_get_timings);
+
+int omapdss_default_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ return memcmp(&dssdev->panel.timings, timings, sizeof(*timings));
+}
+EXPORT_SYMBOL(omapdss_default_check_timings);
+
+bool omapdss_default_is_detected(struct omap_dss_device *dssdev)
+{
+ if (dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED) {
+ /* show resume info for suspended displays */
+ return dssdev->activate_after_resume;
+ } else {
+ return dssdev->state != OMAP_DSS_DISPLAY_DISABLED;
+ }
+}
+EXPORT_SYMBOL(omapdss_default_is_detected);
+
+
void default_get_overlay_fifo_thresholds(enum omap_plane plane,
u32 fifo_size, enum omap_burst_size *burst_size,
u32 *fifo_low, u32 *fifo_high)
@@ -345,6 +369,7 @@ int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev)
return 16;
case OMAP_DISPLAY_TYPE_VENC:
case OMAP_DISPLAY_TYPE_SDI:
+ case OMAP_DISPLAY_TYPE_HDMI:
return 24;
default:
BUG();
@@ -371,6 +396,7 @@ bool dss_use_replication(struct omap_dss_device *dssdev,
case OMAP_DISPLAY_TYPE_DPI:
bpp = dssdev->phy.dpi.data_lines;
break;
+ case OMAP_DISPLAY_TYPE_HDMI:
case OMAP_DISPLAY_TYPE_VENC:
case OMAP_DISPLAY_TYPE_SDI:
bpp = 24;
@@ -396,29 +422,6 @@ void dss_init_device(struct platform_device *pdev,
switch (dssdev->type) {
#ifdef CONFIG_OMAP2_DSS_DPI
case OMAP_DISPLAY_TYPE_DPI:
-#endif
-#ifdef CONFIG_OMAP2_DSS_RFBI
- case OMAP_DISPLAY_TYPE_DBI:
-#endif
-#ifdef CONFIG_OMAP2_DSS_SDI
- case OMAP_DISPLAY_TYPE_SDI:
-#endif
-#ifdef CONFIG_OMAP2_DSS_DSI
- case OMAP_DISPLAY_TYPE_DSI:
-#endif
-#ifdef CONFIG_OMAP2_DSS_VENC
- case OMAP_DISPLAY_TYPE_VENC:
-#endif
- break;
- default:
- DSSERR("Support for display '%s' not compiled in.\n",
- dssdev->name);
- return;
- }
-
- switch (dssdev->type) {
-#ifdef CONFIG_OMAP2_DSS_DPI
- case OMAP_DISPLAY_TYPE_DPI:
r = dpi_init_display(dssdev);
break;
#endif
@@ -442,8 +445,13 @@ void dss_init_device(struct platform_device *pdev,
r = dsi_init_display(dssdev);
break;
#endif
+ case OMAP_DISPLAY_TYPE_HDMI:
+ r = hdmi_init_display(dssdev);
+ break;
default:
- BUG();
+ DSSERR("Support for display '%s' not compiled in.\n",
+ dssdev->name);
+ return;
}
if (r) {
@@ -633,3 +641,50 @@ void omap_dss_stop_device(struct omap_dss_device *dssdev)
}
EXPORT_SYMBOL(omap_dss_stop_device);
+/* since omap_dss_update_size can be called in irq context, schedule work from
+ * work-queue to deliver notification to client..
+ */
+struct notify_work {
+ struct work_struct work;
+ struct omap_dss_device *dssdev;
+ enum omap_dss_event evt;
+};
+
+static void notify_worker(struct work_struct *work)
+{
+ struct notify_work *nw =
+ container_of(work, struct notify_work, work);
+ struct omap_dss_device *dssdev = nw->dssdev;
+ blocking_notifier_call_chain(&dssdev->notifier, nw->evt, dssdev);
+ kfree(work);
+}
+
+/**
+ * Called by lower level driver to notify about a change in resolution, etc.
+ */
+void omap_dss_notify(struct omap_dss_device *dssdev, enum omap_dss_event evt)
+{
+ struct notify_work *nw =
+ kmalloc(sizeof(struct notify_work), GFP_KERNEL);
+ if (nw) {
+ INIT_WORK(&nw->work, notify_worker);
+ nw->dssdev = dssdev;
+ nw->evt = evt;
+ schedule_work(&nw->work);
+ }
+}
+EXPORT_SYMBOL(omap_dss_notify);
+
+void omap_dss_add_notify(struct omap_dss_device *dssdev,
+ struct notifier_block *nb)
+{
+ blocking_notifier_chain_register(&dssdev->notifier, nb);
+}
+EXPORT_SYMBOL(omap_dss_add_notify);
+
+void omap_dss_remove_notify(struct omap_dss_device *dssdev,
+ struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&dssdev->notifier, nb);
+}
+EXPORT_SYMBOL(omap_dss_remove_notify);
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index 75fb0a51543..6b0c0299527 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -57,13 +57,13 @@ static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft,
if (r)
return r;
- dss_select_dispc_clk_source(DSS_SRC_DSI1_PLL_FCLK);
+ dss_select_dispc_clk_source(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC);
r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
if (r)
return r;
- *fck = dsi_cinfo.dsi1_pll_fclk;
+ *fck = dsi_cinfo.dsi_pll_hsdiv_dispc_clk;
*lck_div = dispc_cinfo.lck_div;
*pck_div = dispc_cinfo.pck_div;
@@ -107,7 +107,7 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
bool is_tft;
int r = 0;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
dispc_set_pol_freq(dssdev->manager->id, dssdev->panel.config,
dssdev->panel.acbi, dssdev->panel.acb);
@@ -137,7 +137,7 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
dispc_set_lcd_timings(dssdev->manager->id, t);
err0:
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
return r;
}
@@ -145,6 +145,9 @@ static int dpi_basic_init(struct omap_dss_device *dssdev)
{
bool is_tft;
+ if (!dssdev->manager)
+ return -ENODEV;
+
is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
dispc_set_parallel_interface_mode(dssdev->manager->id,
@@ -173,14 +176,14 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
goto err1;
}
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
r = dpi_basic_init(dssdev);
if (r)
goto err2;
#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
- dss_clk_enable(DSS_CLK_FCK2);
+ dss_clk_enable(DSS_CLK_SYSCK);
r = dsi_pll_init(dssdev, 0, 1);
if (r)
goto err3;
@@ -199,10 +202,10 @@ err4:
#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
dsi_pll_uninit();
err3:
- dss_clk_disable(DSS_CLK_FCK2);
+ dss_clk_disable(DSS_CLK_SYSCK);
#endif
err2:
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
if (cpu_is_omap34xx())
regulator_disable(dpi.vdds_dsi_reg);
err1:
@@ -217,12 +220,12 @@ void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
dssdev->manager->disable(dssdev->manager);
#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
- dss_select_dispc_clk_source(DSS_SRC_DSS1_ALWON_FCLK);
+ dss_select_dispc_clk_source(DSS_CLK_SRC_FCK);
dsi_pll_uninit();
- dss_clk_disable(DSS_CLK_FCK2);
+ dss_clk_disable(DSS_CLK_SYSCK);
#endif
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
if (cpu_is_omap34xx())
regulator_disable(dpi.vdds_dsi_reg);
@@ -271,7 +274,7 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
if (r)
return r;
- fck = dsi_cinfo.dsi1_pll_fclk;
+ fck = dsi_cinfo.dsi_pll_hsdiv_dispc_clk;
lck_div = dispc_cinfo.lck_div;
pck_div = dispc_cinfo.pck_div;
}
@@ -303,22 +306,27 @@ int dpi_init_display(struct omap_dss_device *dssdev)
{
DSSDBG("init_display\n");
- return 0;
-}
+ if (cpu_is_omap34xx() && dpi.vdds_dsi_reg == NULL) {
+ struct regulator *vdds_dsi;
-int dpi_init(struct platform_device *pdev)
-{
- if (cpu_is_omap34xx()) {
- dpi.vdds_dsi_reg = dss_get_vdds_dsi();
- if (IS_ERR(dpi.vdds_dsi_reg)) {
+ vdds_dsi = dss_get_vdds_dsi();
+
+ if (IS_ERR(vdds_dsi)) {
DSSERR("can't get VDDS_DSI regulator\n");
- return PTR_ERR(dpi.vdds_dsi_reg);
+ return PTR_ERR(vdds_dsi);
}
+
+ dpi.vdds_dsi_reg = vdds_dsi;
}
return 0;
}
+int dpi_init(void)
+{
+ return 0;
+}
+
void dpi_exit(void)
{
}
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index ddf3a056082..e9b734ccb1a 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -38,12 +38,11 @@
#include <plat/clock.h>
#include "dss.h"
+#include "dss_features.h"
/*#define VERBOSE_IRQ*/
#define DSI_CATCH_MISSING_TE
-#define DSI_BASE 0x4804FC00
-
struct dsi_reg { u16 idx; };
#define DSI_REG(idx) ((const struct dsi_reg) { idx })
@@ -186,13 +185,15 @@ struct dsi_reg { u16 idx; };
#define DSI_DT_RX_SHORT_READ_1 0x21
#define DSI_DT_RX_SHORT_READ_2 0x22
-#define FINT_MAX 2100000
-#define FINT_MIN 750000
-#define REGN_MAX (1 << 7)
-#define REGM_MAX ((1 << 11) - 1)
-#define REGM3_MAX (1 << 4)
-#define REGM4_MAX (1 << 4)
-#define LP_DIV_MAX ((1 << 13) - 1)
+typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
+
+#define DSI_MAX_NR_ISRS 2
+
+struct dsi_isr_data {
+ omap_dsi_isr_t isr;
+ void *arg;
+ u32 mask;
+};
enum fifo_size {
DSI_FIFO_SIZE_0 = 0,
@@ -220,9 +221,17 @@ struct dsi_irq_stats {
unsigned cio_irqs[32];
};
+struct dsi_isr_tables {
+ struct dsi_isr_data isr_table[DSI_MAX_NR_ISRS];
+ struct dsi_isr_data isr_table_vc[4][DSI_MAX_NR_ISRS];
+ struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS];
+};
+
static struct
{
+ struct platform_device *pdev;
void __iomem *base;
+ int irq;
struct dsi_clock_info current_cinfo;
@@ -232,6 +241,7 @@ static struct
enum dsi_vc_mode mode;
struct omap_dss_device *dssdev;
enum fifo_size fifo_size;
+ int vc_id;
} vc[4];
struct mutex lock;
@@ -239,8 +249,10 @@ static struct
unsigned pll_locked;
- struct completion bta_completion;
- void (*bta_callback)(void);
+ spinlock_t irq_lock;
+ struct dsi_isr_tables isr_tables;
+ /* space for a copy used by the interrupt handler */
+ struct dsi_isr_tables isr_tables_copy;
int update_channel;
struct dsi_update_region update_region;
@@ -275,6 +287,11 @@ static struct
spinlock_t irq_stats_lock;
struct dsi_irq_stats irq_stats;
#endif
+ /* DSI PLL Parameter Ranges */
+ unsigned long regm_max, regn_max;
+ unsigned long regm_dispc_max, regm_dsi_max;
+ unsigned long fint_min, fint_max;
+ unsigned long lpdiv_max;
} dsi;
#ifdef DEBUG
@@ -318,6 +335,11 @@ static bool dsi_bus_is_locked(void)
return dsi.bus_lock.count == 0;
}
+static void dsi_completion_handler(void *data, u32 mask)
+{
+ complete((struct completion *)data);
+}
+
static inline int wait_for_bit_change(const struct dsi_reg idx, int bitnum,
int value)
{
@@ -387,6 +409,9 @@ static void dsi_perf_show(const char *name)
static void print_irq_status(u32 status)
{
+ if (status == 0)
+ return;
+
#ifndef VERBOSE_IRQ
if ((status & ~DSI_IRQ_CHANNEL_MASK) == 0)
return;
@@ -422,6 +447,9 @@ static void print_irq_status(u32 status)
static void print_irq_status_vc(int channel, u32 status)
{
+ if (status == 0)
+ return;
+
#ifndef VERBOSE_IRQ
if ((status & ~DSI_VC_IRQ_PACKET_SENT) == 0)
return;
@@ -448,6 +476,9 @@ static void print_irq_status_vc(int channel, u32 status)
static void print_irq_status_cio(u32 status)
{
+ if (status == 0)
+ return;
+
printk(KERN_DEBUG "DSI CIO IRQ 0x%x: ", status);
#define PIS(x) \
@@ -478,22 +509,33 @@ static void print_irq_status_cio(u32 status)
printk("\n");
}
-static int debug_irq;
-
-/* called from dss */
-void dsi_irq_handler(void)
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+static void dsi_collect_irq_stats(u32 irqstatus, u32 *vcstatus, u32 ciostatus)
{
- u32 irqstatus, vcstatus, ciostatus;
int i;
- irqstatus = dsi_read_reg(DSI_IRQSTATUS);
-
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
spin_lock(&dsi.irq_stats_lock);
+
dsi.irq_stats.irq_count++;
dss_collect_irq_stats(irqstatus, dsi.irq_stats.dsi_irqs);
+
+ for (i = 0; i < 4; ++i)
+ dss_collect_irq_stats(vcstatus[i], dsi.irq_stats.vc_irqs[i]);
+
+ dss_collect_irq_stats(ciostatus, dsi.irq_stats.cio_irqs);
+
+ spin_unlock(&dsi.irq_stats_lock);
+}
+#else
+#define dsi_collect_irq_stats(irqstatus, vcstatus, ciostatus)
#endif
+static int debug_irq;
+
+static void dsi_handle_irq_errors(u32 irqstatus, u32 *vcstatus, u32 ciostatus)
+{
+ int i;
+
if (irqstatus & DSI_IRQ_ERROR_MASK) {
DSSERR("DSI error, irqstatus %x\n", irqstatus);
print_irq_status(irqstatus);
@@ -504,37 +546,88 @@ void dsi_irq_handler(void)
print_irq_status(irqstatus);
}
-#ifdef DSI_CATCH_MISSING_TE
- if (irqstatus & DSI_IRQ_TE_TRIGGER)
- del_timer(&dsi.te_timer);
-#endif
+ for (i = 0; i < 4; ++i) {
+ if (vcstatus[i] & DSI_VC_IRQ_ERROR_MASK) {
+ DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
+ i, vcstatus[i]);
+ print_irq_status_vc(i, vcstatus[i]);
+ } else if (debug_irq) {
+ print_irq_status_vc(i, vcstatus[i]);
+ }
+ }
+
+ if (ciostatus & DSI_CIO_IRQ_ERROR_MASK) {
+ DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
+ print_irq_status_cio(ciostatus);
+ } else if (debug_irq) {
+ print_irq_status_cio(ciostatus);
+ }
+}
+
+static void dsi_call_isrs(struct dsi_isr_data *isr_array,
+ unsigned isr_array_size, u32 irqstatus)
+{
+ struct dsi_isr_data *isr_data;
+ int i;
+
+ for (i = 0; i < isr_array_size; i++) {
+ isr_data = &isr_array[i];
+ if (isr_data->isr && isr_data->mask & irqstatus)
+ isr_data->isr(isr_data->arg, irqstatus);
+ }
+}
+
+static void dsi_handle_isrs(struct dsi_isr_tables *isr_tables,
+ u32 irqstatus, u32 *vcstatus, u32 ciostatus)
+{
+ int i;
+
+ dsi_call_isrs(isr_tables->isr_table,
+ ARRAY_SIZE(isr_tables->isr_table),
+ irqstatus);
for (i = 0; i < 4; ++i) {
- if ((irqstatus & (1<<i)) == 0)
+ if (vcstatus[i] == 0)
continue;
+ dsi_call_isrs(isr_tables->isr_table_vc[i],
+ ARRAY_SIZE(isr_tables->isr_table_vc[i]),
+ vcstatus[i]);
+ }
- vcstatus = dsi_read_reg(DSI_VC_IRQSTATUS(i));
+ if (ciostatus != 0)
+ dsi_call_isrs(isr_tables->isr_table_cio,
+ ARRAY_SIZE(isr_tables->isr_table_cio),
+ ciostatus);
+}
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- dss_collect_irq_stats(vcstatus, dsi.irq_stats.vc_irqs[i]);
-#endif
+static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
+{
+ u32 irqstatus, vcstatus[4], ciostatus;
+ int i;
- if (vcstatus & DSI_VC_IRQ_BTA) {
- complete(&dsi.bta_completion);
+ spin_lock(&dsi.irq_lock);
- if (dsi.bta_callback)
- dsi.bta_callback();
- }
+ irqstatus = dsi_read_reg(DSI_IRQSTATUS);
- if (vcstatus & DSI_VC_IRQ_ERROR_MASK) {
- DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
- i, vcstatus);
- print_irq_status_vc(i, vcstatus);
- } else if (debug_irq) {
- print_irq_status_vc(i, vcstatus);
+ /* IRQ is not for us */
+ if (!irqstatus) {
+ spin_unlock(&dsi.irq_lock);
+ return IRQ_NONE;
+ }
+
+ dsi_write_reg(DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
+ /* flush posted write */
+ dsi_read_reg(DSI_IRQSTATUS);
+
+ for (i = 0; i < 4; ++i) {
+ if ((irqstatus & (1 << i)) == 0) {
+ vcstatus[i] = 0;
+ continue;
}
- dsi_write_reg(DSI_VC_IRQSTATUS(i), vcstatus);
+ vcstatus[i] = dsi_read_reg(DSI_VC_IRQSTATUS(i));
+
+ dsi_write_reg(DSI_VC_IRQSTATUS(i), vcstatus[i]);
/* flush posted write */
dsi_read_reg(DSI_VC_IRQSTATUS(i));
}
@@ -542,117 +635,307 @@ void dsi_irq_handler(void)
if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
ciostatus = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- dss_collect_irq_stats(ciostatus, dsi.irq_stats.cio_irqs);
-#endif
-
dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
/* flush posted write */
dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
+ } else {
+ ciostatus = 0;
+ }
- if (ciostatus & DSI_CIO_IRQ_ERROR_MASK) {
- DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
- print_irq_status_cio(ciostatus);
- } else if (debug_irq) {
- print_irq_status_cio(ciostatus);
- }
+#ifdef DSI_CATCH_MISSING_TE
+ if (irqstatus & DSI_IRQ_TE_TRIGGER)
+ del_timer(&dsi.te_timer);
+#endif
+
+ /* make a copy and unlock, so that isrs can unregister
+ * themselves */
+ memcpy(&dsi.isr_tables_copy, &dsi.isr_tables, sizeof(dsi.isr_tables));
+
+ spin_unlock(&dsi.irq_lock);
+
+ dsi_handle_isrs(&dsi.isr_tables_copy, irqstatus, vcstatus, ciostatus);
+
+ dsi_handle_irq_errors(irqstatus, vcstatus, ciostatus);
+
+ dsi_collect_irq_stats(irqstatus, vcstatus, ciostatus);
+
+ return IRQ_HANDLED;
+}
+
+/* dsi.irq_lock has to be locked by the caller */
+static void _omap_dsi_configure_irqs(struct dsi_isr_data *isr_array,
+ unsigned isr_array_size, u32 default_mask,
+ const struct dsi_reg enable_reg,
+ const struct dsi_reg status_reg)
+{
+ struct dsi_isr_data *isr_data;
+ u32 mask;
+ u32 old_mask;
+ int i;
+
+ mask = default_mask;
+
+ for (i = 0; i < isr_array_size; i++) {
+ isr_data = &isr_array[i];
+
+ if (isr_data->isr == NULL)
+ continue;
+
+ mask |= isr_data->mask;
}
- dsi_write_reg(DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
- /* flush posted write */
- dsi_read_reg(DSI_IRQSTATUS);
+ old_mask = dsi_read_reg(enable_reg);
+ /* clear the irqstatus for newly enabled irqs */
+ dsi_write_reg(status_reg, (mask ^ old_mask) & mask);
+ dsi_write_reg(enable_reg, mask);
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- spin_unlock(&dsi.irq_stats_lock);
+ /* flush posted writes */
+ dsi_read_reg(enable_reg);
+ dsi_read_reg(status_reg);
+}
+
+/* dsi.irq_lock has to be locked by the caller */
+static void _omap_dsi_set_irqs(void)
+{
+ u32 mask = DSI_IRQ_ERROR_MASK;
+#ifdef DSI_CATCH_MISSING_TE
+ mask |= DSI_IRQ_TE_TRIGGER;
#endif
+ _omap_dsi_configure_irqs(dsi.isr_tables.isr_table,
+ ARRAY_SIZE(dsi.isr_tables.isr_table), mask,
+ DSI_IRQENABLE, DSI_IRQSTATUS);
}
+/* dsi.irq_lock has to be locked by the caller */
+static void _omap_dsi_set_irqs_vc(int vc)
+{
+ _omap_dsi_configure_irqs(dsi.isr_tables.isr_table_vc[vc],
+ ARRAY_SIZE(dsi.isr_tables.isr_table_vc[vc]),
+ DSI_VC_IRQ_ERROR_MASK,
+ DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc));
+}
+
+/* dsi.irq_lock has to be locked by the caller */
+static void _omap_dsi_set_irqs_cio(void)
+{
+ _omap_dsi_configure_irqs(dsi.isr_tables.isr_table_cio,
+ ARRAY_SIZE(dsi.isr_tables.isr_table_cio),
+ DSI_CIO_IRQ_ERROR_MASK,
+ DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS);
+}
static void _dsi_initialize_irq(void)
{
- u32 l;
+ unsigned long flags;
+ int vc;
+
+ spin_lock_irqsave(&dsi.irq_lock, flags);
+
+ memset(&dsi.isr_tables, 0, sizeof(dsi.isr_tables));
+
+ _omap_dsi_set_irqs();
+ for (vc = 0; vc < 4; ++vc)
+ _omap_dsi_set_irqs_vc(vc);
+ _omap_dsi_set_irqs_cio();
+
+ spin_unlock_irqrestore(&dsi.irq_lock, flags);
+}
+
+static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
+ struct dsi_isr_data *isr_array, unsigned isr_array_size)
+{
+ struct dsi_isr_data *isr_data;
+ int free_idx;
int i;
- /* disable all interrupts */
- dsi_write_reg(DSI_IRQENABLE, 0);
- for (i = 0; i < 4; ++i)
- dsi_write_reg(DSI_VC_IRQENABLE(i), 0);
- dsi_write_reg(DSI_COMPLEXIO_IRQ_ENABLE, 0);
+ BUG_ON(isr == NULL);
- /* clear interrupt status */
- l = dsi_read_reg(DSI_IRQSTATUS);
- dsi_write_reg(DSI_IRQSTATUS, l & ~DSI_IRQ_CHANNEL_MASK);
+ /* check for duplicate entry and find a free slot */
+ free_idx = -1;
+ for (i = 0; i < isr_array_size; i++) {
+ isr_data = &isr_array[i];
- for (i = 0; i < 4; ++i) {
- l = dsi_read_reg(DSI_VC_IRQSTATUS(i));
- dsi_write_reg(DSI_VC_IRQSTATUS(i), l);
+ if (isr_data->isr == isr && isr_data->arg == arg &&
+ isr_data->mask == mask) {
+ return -EINVAL;
+ }
+
+ if (isr_data->isr == NULL && free_idx == -1)
+ free_idx = i;
}
- l = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
- dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, l);
+ if (free_idx == -1)
+ return -EBUSY;
- /* enable error irqs */
- l = DSI_IRQ_ERROR_MASK;
-#ifdef DSI_CATCH_MISSING_TE
- l |= DSI_IRQ_TE_TRIGGER;
-#endif
- dsi_write_reg(DSI_IRQENABLE, l);
+ isr_data = &isr_array[free_idx];
+ isr_data->isr = isr;
+ isr_data->arg = arg;
+ isr_data->mask = mask;
- l = DSI_VC_IRQ_ERROR_MASK;
- for (i = 0; i < 4; ++i)
- dsi_write_reg(DSI_VC_IRQENABLE(i), l);
+ return 0;
+}
+
+static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
+ struct dsi_isr_data *isr_array, unsigned isr_array_size)
+{
+ struct dsi_isr_data *isr_data;
+ int i;
+
+ for (i = 0; i < isr_array_size; i++) {
+ isr_data = &isr_array[i];
+ if (isr_data->isr != isr || isr_data->arg != arg ||
+ isr_data->mask != mask)
+ continue;
- l = DSI_CIO_IRQ_ERROR_MASK;
- dsi_write_reg(DSI_COMPLEXIO_IRQ_ENABLE, l);
+ isr_data->isr = NULL;
+ isr_data->arg = NULL;
+ isr_data->mask = 0;
+
+ return 0;
+ }
+
+ return -EINVAL;
}
-static u32 dsi_get_errors(void)
+static int dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask)
{
unsigned long flags;
- u32 e;
- spin_lock_irqsave(&dsi.errors_lock, flags);
- e = dsi.errors;
- dsi.errors = 0;
- spin_unlock_irqrestore(&dsi.errors_lock, flags);
- return e;
+ int r;
+
+ spin_lock_irqsave(&dsi.irq_lock, flags);
+
+ r = _dsi_register_isr(isr, arg, mask, dsi.isr_tables.isr_table,
+ ARRAY_SIZE(dsi.isr_tables.isr_table));
+
+ if (r == 0)
+ _omap_dsi_set_irqs();
+
+ spin_unlock_irqrestore(&dsi.irq_lock, flags);
+
+ return r;
}
-static void dsi_vc_enable_bta_irq(int channel)
+static int dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask)
{
- u32 l;
+ unsigned long flags;
+ int r;
+
+ spin_lock_irqsave(&dsi.irq_lock, flags);
- dsi_write_reg(DSI_VC_IRQSTATUS(channel), DSI_VC_IRQ_BTA);
+ r = _dsi_unregister_isr(isr, arg, mask, dsi.isr_tables.isr_table,
+ ARRAY_SIZE(dsi.isr_tables.isr_table));
- l = dsi_read_reg(DSI_VC_IRQENABLE(channel));
- l |= DSI_VC_IRQ_BTA;
- dsi_write_reg(DSI_VC_IRQENABLE(channel), l);
+ if (r == 0)
+ _omap_dsi_set_irqs();
+
+ spin_unlock_irqrestore(&dsi.irq_lock, flags);
+
+ return r;
}
-static void dsi_vc_disable_bta_irq(int channel)
+static int dsi_register_isr_vc(int channel, omap_dsi_isr_t isr, void *arg,
+ u32 mask)
{
- u32 l;
+ unsigned long flags;
+ int r;
+
+ spin_lock_irqsave(&dsi.irq_lock, flags);
+
+ r = _dsi_register_isr(isr, arg, mask,
+ dsi.isr_tables.isr_table_vc[channel],
+ ARRAY_SIZE(dsi.isr_tables.isr_table_vc[channel]));
+
+ if (r == 0)
+ _omap_dsi_set_irqs_vc(channel);
+
+ spin_unlock_irqrestore(&dsi.irq_lock, flags);
- l = dsi_read_reg(DSI_VC_IRQENABLE(channel));
- l &= ~DSI_VC_IRQ_BTA;
- dsi_write_reg(DSI_VC_IRQENABLE(channel), l);
+ return r;
+}
+
+static int dsi_unregister_isr_vc(int channel, omap_dsi_isr_t isr, void *arg,
+ u32 mask)
+{
+ unsigned long flags;
+ int r;
+
+ spin_lock_irqsave(&dsi.irq_lock, flags);
+
+ r = _dsi_unregister_isr(isr, arg, mask,
+ dsi.isr_tables.isr_table_vc[channel],
+ ARRAY_SIZE(dsi.isr_tables.isr_table_vc[channel]));
+
+ if (r == 0)
+ _omap_dsi_set_irqs_vc(channel);
+
+ spin_unlock_irqrestore(&dsi.irq_lock, flags);
+
+ return r;
+}
+
+static int dsi_register_isr_cio(omap_dsi_isr_t isr, void *arg, u32 mask)
+{
+ unsigned long flags;
+ int r;
+
+ spin_lock_irqsave(&dsi.irq_lock, flags);
+
+ r = _dsi_register_isr(isr, arg, mask, dsi.isr_tables.isr_table_cio,
+ ARRAY_SIZE(dsi.isr_tables.isr_table_cio));
+
+ if (r == 0)
+ _omap_dsi_set_irqs_cio();
+
+ spin_unlock_irqrestore(&dsi.irq_lock, flags);
+
+ return r;
+}
+
+static int dsi_unregister_isr_cio(omap_dsi_isr_t isr, void *arg, u32 mask)
+{
+ unsigned long flags;
+ int r;
+
+ spin_lock_irqsave(&dsi.irq_lock, flags);
+
+ r = _dsi_unregister_isr(isr, arg, mask, dsi.isr_tables.isr_table_cio,
+ ARRAY_SIZE(dsi.isr_tables.isr_table_cio));
+
+ if (r == 0)
+ _omap_dsi_set_irqs_cio();
+
+ spin_unlock_irqrestore(&dsi.irq_lock, flags);
+
+ return r;
+}
+
+static u32 dsi_get_errors(void)
+{
+ unsigned long flags;
+ u32 e;
+ spin_lock_irqsave(&dsi.errors_lock, flags);
+ e = dsi.errors;
+ dsi.errors = 0;
+ spin_unlock_irqrestore(&dsi.errors_lock, flags);
+ return e;
}
-/* DSI func clock. this could also be DSI2_PLL_FCLK */
+/* DSI func clock. this could also be dsi_pll_hsdiv_dsi_clk */
static inline void enable_clocks(bool enable)
{
if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
}
/* source clock for DSI PLL. this could also be PCLKFREE */
static inline void dsi_enable_pll_clock(bool enable)
{
if (enable)
- dss_clk_enable(DSS_CLK_FCK2);
+ dss_clk_enable(DSS_CLK_SYSCK);
else
- dss_clk_disable(DSS_CLK_FCK2);
+ dss_clk_disable(DSS_CLK_SYSCK);
if (enable && dsi.pll_locked) {
if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1)
@@ -707,14 +990,14 @@ static inline int dsi_if_enable(bool enable)
return 0;
}
-unsigned long dsi_get_dsi1_pll_rate(void)
+unsigned long dsi_get_pll_hsdiv_dispc_rate(void)
{
- return dsi.current_cinfo.dsi1_pll_fclk;
+ return dsi.current_cinfo.dsi_pll_hsdiv_dispc_clk;
}
-static unsigned long dsi_get_dsi2_pll_rate(void)
+static unsigned long dsi_get_pll_hsdiv_dsi_rate(void)
{
- return dsi.current_cinfo.dsi2_pll_fclk;
+ return dsi.current_cinfo.dsi_pll_hsdiv_dsi_clk;
}
static unsigned long dsi_get_txbyteclkhs(void)
@@ -726,12 +1009,12 @@ static unsigned long dsi_fclk_rate(void)
{
unsigned long r;
- if (dss_get_dsi_clk_source() == DSS_SRC_DSS1_ALWON_FCLK) {
- /* DSI FCLK source is DSS1_ALWON_FCK, which is dss1_fck */
- r = dss_clk_get_rate(DSS_CLK_FCK1);
+ if (dss_get_dsi_clk_source() == DSS_CLK_SRC_FCK) {
+ /* DSI FCLK source is DSS_CLK_FCK */
+ r = dss_clk_get_rate(DSS_CLK_FCK);
} else {
- /* DSI FCLK source is DSI2_PLL_FCLK */
- r = dsi_get_dsi2_pll_rate();
+ /* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */
+ r = dsi_get_pll_hsdiv_dsi_rate();
}
return r;
@@ -745,7 +1028,7 @@ static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev)
lp_clk_div = dssdev->phy.dsi.div.lp_clk_div;
- if (lp_clk_div == 0 || lp_clk_div > LP_DIV_MAX)
+ if (lp_clk_div == 0 || lp_clk_div > dsi.lpdiv_max)
return -EINVAL;
dsi_fclk = dsi_fclk_rate();
@@ -795,22 +1078,22 @@ static int dsi_pll_power(enum dsi_pll_power_state state)
static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
struct dsi_clock_info *cinfo)
{
- if (cinfo->regn == 0 || cinfo->regn > REGN_MAX)
+ if (cinfo->regn == 0 || cinfo->regn > dsi.regn_max)
return -EINVAL;
- if (cinfo->regm == 0 || cinfo->regm > REGM_MAX)
+ if (cinfo->regm == 0 || cinfo->regm > dsi.regm_max)
return -EINVAL;
- if (cinfo->regm3 > REGM3_MAX)
+ if (cinfo->regm_dispc > dsi.regm_dispc_max)
return -EINVAL;
- if (cinfo->regm4 > REGM4_MAX)
+ if (cinfo->regm_dsi > dsi.regm_dsi_max)
return -EINVAL;
- if (cinfo->use_dss2_fck) {
- cinfo->clkin = dss_clk_get_rate(DSS_CLK_FCK2);
+ if (cinfo->use_sys_clk) {
+ cinfo->clkin = dss_clk_get_rate(DSS_CLK_SYSCK);
/* XXX it is unclear if highfreq should be used
- * with DSS2_FCK source also */
+ * with DSS_SYS_CLK source also */
cinfo->highfreq = 0;
} else {
cinfo->clkin = dispc_pclk_rate(dssdev->manager->id);
@@ -823,7 +1106,7 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1));
- if (cinfo->fint > FINT_MAX || cinfo->fint < FINT_MIN)
+ if (cinfo->fint > dsi.fint_max || cinfo->fint < dsi.fint_min)
return -EINVAL;
cinfo->clkin4ddr = 2 * cinfo->regm * cinfo->fint;
@@ -831,15 +1114,17 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
if (cinfo->clkin4ddr > 1800 * 1000 * 1000)
return -EINVAL;
- if (cinfo->regm3 > 0)
- cinfo->dsi1_pll_fclk = cinfo->clkin4ddr / cinfo->regm3;
+ if (cinfo->regm_dispc > 0)
+ cinfo->dsi_pll_hsdiv_dispc_clk =
+ cinfo->clkin4ddr / cinfo->regm_dispc;
else
- cinfo->dsi1_pll_fclk = 0;
+ cinfo->dsi_pll_hsdiv_dispc_clk = 0;
- if (cinfo->regm4 > 0)
- cinfo->dsi2_pll_fclk = cinfo->clkin4ddr / cinfo->regm4;
+ if (cinfo->regm_dsi > 0)
+ cinfo->dsi_pll_hsdiv_dsi_clk =
+ cinfo->clkin4ddr / cinfo->regm_dsi;
else
- cinfo->dsi2_pll_fclk = 0;
+ cinfo->dsi_pll_hsdiv_dsi_clk = 0;
return 0;
}
@@ -852,23 +1137,25 @@ int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck,
struct dispc_clock_info best_dispc;
int min_fck_per_pck;
int match = 0;
- unsigned long dss_clk_fck2;
+ unsigned long dss_sys_clk, max_dss_fck;
+
+ dss_sys_clk = dss_clk_get_rate(DSS_CLK_SYSCK);
- dss_clk_fck2 = dss_clk_get_rate(DSS_CLK_FCK2);
+ max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
if (req_pck == dsi.cache_req_pck &&
- dsi.cache_cinfo.clkin == dss_clk_fck2) {
+ dsi.cache_cinfo.clkin == dss_sys_clk) {
DSSDBG("DSI clock info found from cache\n");
*dsi_cinfo = dsi.cache_cinfo;
- dispc_find_clk_divs(is_tft, req_pck, dsi_cinfo->dsi1_pll_fclk,
- dispc_cinfo);
+ dispc_find_clk_divs(is_tft, req_pck,
+ dsi_cinfo->dsi_pll_hsdiv_dispc_clk, dispc_cinfo);
return 0;
}
min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
if (min_fck_per_pck &&
- req_pck * min_fck_per_pck > DISPC_MAX_FCK) {
+ req_pck * min_fck_per_pck > max_dss_fck) {
DSSERR("Requested pixel clock not possible with the current "
"OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
"the constraint off.\n");
@@ -882,24 +1169,24 @@ retry:
memset(&best_dispc, 0, sizeof(best_dispc));
memset(&cur, 0, sizeof(cur));
- cur.clkin = dss_clk_fck2;
- cur.use_dss2_fck = 1;
+ cur.clkin = dss_sys_clk;
+ cur.use_sys_clk = 1;
cur.highfreq = 0;
/* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */
/* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */
/* To reduce PLL lock time, keep Fint high (around 2 MHz) */
- for (cur.regn = 1; cur.regn < REGN_MAX; ++cur.regn) {
+ for (cur.regn = 1; cur.regn < dsi.regn_max; ++cur.regn) {
if (cur.highfreq == 0)
cur.fint = cur.clkin / cur.regn;
else
cur.fint = cur.clkin / (2 * cur.regn);
- if (cur.fint > FINT_MAX || cur.fint < FINT_MIN)
+ if (cur.fint > dsi.fint_max || cur.fint < dsi.fint_min)
continue;
/* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */
- for (cur.regm = 1; cur.regm < REGM_MAX; ++cur.regm) {
+ for (cur.regm = 1; cur.regm < dsi.regm_max; ++cur.regm) {
unsigned long a, b;
a = 2 * cur.regm * (cur.clkin/1000);
@@ -909,30 +1196,32 @@ retry:
if (cur.clkin4ddr > 1800 * 1000 * 1000)
break;
- /* DSI1_PLL_FCLK(MHz) = DSIPHY(MHz) / regm3 < 173MHz */
- for (cur.regm3 = 1; cur.regm3 < REGM3_MAX;
- ++cur.regm3) {
+ /* dsi_pll_hsdiv_dispc_clk(MHz) =
+ * DSIPHY(MHz) / regm_dispc < 173MHz/186Mhz */
+ for (cur.regm_dispc = 1; cur.regm_dispc < dsi.regm_dispc_max;
+ ++cur.regm_dispc) {
struct dispc_clock_info cur_dispc;
- cur.dsi1_pll_fclk = cur.clkin4ddr / cur.regm3;
+ cur.dsi_pll_hsdiv_dispc_clk =
+ cur.clkin4ddr / cur.regm_dispc;
/* this will narrow down the search a bit,
* but still give pixclocks below what was
* requested */
- if (cur.dsi1_pll_fclk < req_pck)
+ if (cur.dsi_pll_hsdiv_dispc_clk < req_pck)
break;
- if (cur.dsi1_pll_fclk > DISPC_MAX_FCK)
+ if (cur.dsi_pll_hsdiv_dispc_clk > max_dss_fck)
continue;
if (min_fck_per_pck &&
- cur.dsi1_pll_fclk <
+ cur.dsi_pll_hsdiv_dispc_clk <
req_pck * min_fck_per_pck)
continue;
match = 1;
dispc_find_clk_divs(is_tft, req_pck,
- cur.dsi1_pll_fclk,
+ cur.dsi_pll_hsdiv_dispc_clk,
&cur_dispc);
if (abs(cur_dispc.pck - req_pck) <
@@ -961,9 +1250,9 @@ found:
return -EINVAL;
}
- /* DSI2_PLL_FCLK (regm4) is not used */
- best.regm4 = 0;
- best.dsi2_pll_fclk = 0;
+ /* dsi_pll_hsdiv_dsi_clk (regm_dsi) is not used */
+ best.regm_dsi = 0;
+ best.dsi_pll_hsdiv_dsi_clk = 0;
if (dsi_cinfo)
*dsi_cinfo = best;
@@ -981,24 +1270,28 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo)
{
int r = 0;
u32 l;
- int f;
+ int f = 0;
+ u8 regn_start, regn_end, regm_start, regm_end;
+ u8 regm_dispc_start, regm_dispc_end, regm_dsi_start, regm_dsi_end;
DSSDBGF();
dsi.current_cinfo.fint = cinfo->fint;
dsi.current_cinfo.clkin4ddr = cinfo->clkin4ddr;
- dsi.current_cinfo.dsi1_pll_fclk = cinfo->dsi1_pll_fclk;
- dsi.current_cinfo.dsi2_pll_fclk = cinfo->dsi2_pll_fclk;
+ dsi.current_cinfo.dsi_pll_hsdiv_dispc_clk =
+ cinfo->dsi_pll_hsdiv_dispc_clk;
+ dsi.current_cinfo.dsi_pll_hsdiv_dsi_clk =
+ cinfo->dsi_pll_hsdiv_dsi_clk;
dsi.current_cinfo.regn = cinfo->regn;
dsi.current_cinfo.regm = cinfo->regm;
- dsi.current_cinfo.regm3 = cinfo->regm3;
- dsi.current_cinfo.regm4 = cinfo->regm4;
+ dsi.current_cinfo.regm_dispc = cinfo->regm_dispc;
+ dsi.current_cinfo.regm_dsi = cinfo->regm_dsi;
DSSDBG("DSI Fint %ld\n", cinfo->fint);
DSSDBG("clkin (%s) rate %ld, highfreq %d\n",
- cinfo->use_dss2_fck ? "dss2_fck" : "pclkfree",
+ cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree",
cinfo->clkin,
cinfo->highfreq);
@@ -1015,38 +1308,53 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo)
DSSDBG("Clock lane freq %ld Hz\n", cinfo->clkin4ddr / 4);
- DSSDBG("regm3 = %d, dsi1_pll_fclk = %lu\n",
- cinfo->regm3, cinfo->dsi1_pll_fclk);
- DSSDBG("regm4 = %d, dsi2_pll_fclk = %lu\n",
- cinfo->regm4, cinfo->dsi2_pll_fclk);
+ DSSDBG("regm_dispc = %d, %s (%s) = %lu\n", cinfo->regm_dispc,
+ dss_get_generic_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
+ dss_feat_get_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
+ cinfo->dsi_pll_hsdiv_dispc_clk);
+ DSSDBG("regm_dsi = %d, %s (%s) = %lu\n", cinfo->regm_dsi,
+ dss_get_generic_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
+ dss_feat_get_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
+ cinfo->dsi_pll_hsdiv_dsi_clk);
+
+ dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGN, &regn_start, &regn_end);
+ dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM, &regm_start, &regm_end);
+ dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DISPC, &regm_dispc_start,
+ &regm_dispc_end);
+ dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DSI, &regm_dsi_start,
+ &regm_dsi_end);
REG_FLD_MOD(DSI_PLL_CONTROL, 0, 0, 0); /* DSI_PLL_AUTOMODE = manual */
l = dsi_read_reg(DSI_PLL_CONFIGURATION1);
l = FLD_MOD(l, 1, 0, 0); /* DSI_PLL_STOPMODE */
- l = FLD_MOD(l, cinfo->regn - 1, 7, 1); /* DSI_PLL_REGN */
- l = FLD_MOD(l, cinfo->regm, 18, 8); /* DSI_PLL_REGM */
- l = FLD_MOD(l, cinfo->regm3 > 0 ? cinfo->regm3 - 1 : 0,
- 22, 19); /* DSI_CLOCK_DIV */
- l = FLD_MOD(l, cinfo->regm4 > 0 ? cinfo->regm4 - 1 : 0,
- 26, 23); /* DSIPROTO_CLOCK_DIV */
+ /* DSI_PLL_REGN */
+ l = FLD_MOD(l, cinfo->regn - 1, regn_start, regn_end);
+ /* DSI_PLL_REGM */
+ l = FLD_MOD(l, cinfo->regm, regm_start, regm_end);
+ /* DSI_CLOCK_DIV */
+ l = FLD_MOD(l, cinfo->regm_dispc > 0 ? cinfo->regm_dispc - 1 : 0,
+ regm_dispc_start, regm_dispc_end);
+ /* DSIPROTO_CLOCK_DIV */
+ l = FLD_MOD(l, cinfo->regm_dsi > 0 ? cinfo->regm_dsi - 1 : 0,
+ regm_dsi_start, regm_dsi_end);
dsi_write_reg(DSI_PLL_CONFIGURATION1, l);
- BUG_ON(cinfo->fint < 750000 || cinfo->fint > 2100000);
- if (cinfo->fint < 1000000)
- f = 0x3;
- else if (cinfo->fint < 1250000)
- f = 0x4;
- else if (cinfo->fint < 1500000)
- f = 0x5;
- else if (cinfo->fint < 1750000)
- f = 0x6;
- else
- f = 0x7;
+ BUG_ON(cinfo->fint < dsi.fint_min || cinfo->fint > dsi.fint_max);
+
+ if (dss_has_feature(FEAT_DSI_PLL_FREQSEL)) {
+ f = cinfo->fint < 1000000 ? 0x3 :
+ cinfo->fint < 1250000 ? 0x4 :
+ cinfo->fint < 1500000 ? 0x5 :
+ cinfo->fint < 1750000 ? 0x6 :
+ 0x7;
+ }
l = dsi_read_reg(DSI_PLL_CONFIGURATION2);
- l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */
- l = FLD_MOD(l, cinfo->use_dss2_fck ? 0 : 1,
+
+ if (dss_has_feature(FEAT_DSI_PLL_FREQSEL))
+ l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */
+ l = FLD_MOD(l, cinfo->use_sys_clk ? 0 : 1,
11, 11); /* DSI_PLL_CLKSEL */
l = FLD_MOD(l, cinfo->highfreq,
12, 12); /* DSI_PLL_HIGHFREQ */
@@ -1101,6 +1409,26 @@ int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk,
DSSDBG("PLL init\n");
+#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
+ /*
+ * HACK: this is just a quick hack to get the USE_DSI_PLL
+ * option working. USE_DSI_PLL is itself a big hack, and
+ * should be removed.
+ */
+ if (dsi.vdds_dsi_reg == NULL) {
+ struct regulator *vdds_dsi;
+
+ vdds_dsi = regulator_get(&dsi.pdev->dev, "vdds_dsi");
+
+ if (IS_ERR(vdds_dsi)) {
+ DSSERR("can't get VDDS_DSI regulator\n");
+ return PTR_ERR(vdds_dsi);
+ }
+
+ dsi.vdds_dsi_reg = vdds_dsi;
+ }
+#endif
+
enable_clocks(1);
dsi_enable_pll_clock(1);
@@ -1162,6 +1490,10 @@ void dsi_dump_clocks(struct seq_file *s)
{
int clksel;
struct dsi_clock_info *cinfo = &dsi.current_cinfo;
+ enum dss_clk_source dispc_clk_src, dsi_clk_src;
+
+ dispc_clk_src = dss_get_dispc_clk_source();
+ dsi_clk_src = dss_get_dsi_clk_source();
enable_clocks(1);
@@ -1171,30 +1503,34 @@ void dsi_dump_clocks(struct seq_file *s)
seq_printf(s, "dsi pll source = %s\n",
clksel == 0 ?
- "dss2_alwon_fclk" : "pclkfree");
+ "dss_sys_clk" : "pclkfree");
seq_printf(s, "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn);
seq_printf(s, "CLKIN4DDR\t%-16luregm %u\n",
cinfo->clkin4ddr, cinfo->regm);
- seq_printf(s, "dsi1_pll_fck\t%-16luregm3 %u\t(%s)\n",
- cinfo->dsi1_pll_fclk,
- cinfo->regm3,
- dss_get_dispc_clk_source() == DSS_SRC_DSS1_ALWON_FCLK ?
+ seq_printf(s, "%s (%s)\t%-16luregm_dispc %u\t(%s)\n",
+ dss_get_generic_clk_source_name(dispc_clk_src),
+ dss_feat_get_clk_source_name(dispc_clk_src),
+ cinfo->dsi_pll_hsdiv_dispc_clk,
+ cinfo->regm_dispc,
+ dispc_clk_src == DSS_CLK_SRC_FCK ?
"off" : "on");
- seq_printf(s, "dsi2_pll_fck\t%-16luregm4 %u\t(%s)\n",
- cinfo->dsi2_pll_fclk,
- cinfo->regm4,
- dss_get_dsi_clk_source() == DSS_SRC_DSS1_ALWON_FCLK ?
+ seq_printf(s, "%s (%s)\t%-16luregm_dsi %u\t(%s)\n",
+ dss_get_generic_clk_source_name(dsi_clk_src),
+ dss_feat_get_clk_source_name(dsi_clk_src),
+ cinfo->dsi_pll_hsdiv_dsi_clk,
+ cinfo->regm_dsi,
+ dsi_clk_src == DSS_CLK_SRC_FCK ?
"off" : "on");
seq_printf(s, "- DSI -\n");
- seq_printf(s, "dsi fclk source = %s\n",
- dss_get_dsi_clk_source() == DSS_SRC_DSS1_ALWON_FCLK ?
- "dss1_alwon_fclk" : "dsi2_pll_fclk");
+ seq_printf(s, "dsi fclk source = %s (%s)\n",
+ dss_get_generic_clk_source_name(dsi_clk_src),
+ dss_feat_get_clk_source_name(dsi_clk_src));
seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate());
@@ -1306,7 +1642,7 @@ void dsi_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
DUMPREG(DSI_REVISION);
DUMPREG(DSI_SYSCONFIG);
@@ -1378,7 +1714,7 @@ void dsi_dump_regs(struct seq_file *s)
DUMPREG(DSI_PLL_CONFIGURATION1);
DUMPREG(DSI_PLL_CONFIGURATION2);
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
#undef DUMPREG
}
@@ -1537,9 +1873,6 @@ static int dsi_complexio_init(struct omap_dss_device *dssdev)
DSSDBG("dsi_complexio_init\n");
- /* CIO_CLK_ICG, enable L3 clk to CIO */
- REG_FLD_MOD(DSI_CLK_CTRL, 1, 14, 14);
-
/* A dummy read using the SCP interface to any DSIPHY register is
* required after DSIPHY reset to complete the reset of the DSI complex
* I/O. */
@@ -1564,10 +1897,12 @@ static int dsi_complexio_init(struct omap_dss_device *dssdev)
goto err;
}
- if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 21, 1) != 1) {
- DSSERR("ComplexIO LDO power down.\n");
- r = -ENODEV;
- goto err;
+ if (dss_has_feature(FEAT_DSI_LDO_STATUS)) {
+ if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 21, 1) != 1) {
+ DSSERR("ComplexIO LDO power down.\n");
+ r = -ENODEV;
+ goto err;
+ }
}
dsi_complexio_timings();
@@ -1622,20 +1957,6 @@ static int _dsi_reset(void)
return _dsi_wait_reset();
}
-static void dsi_reset_tx_fifo(int channel)
-{
- u32 mask;
- u32 l;
-
- /* set fifosize of the channel to 0, then return the old size */
- l = dsi_read_reg(DSI_TX_FIFO_VC_SIZE);
-
- mask = FLD_MASK((8 * channel) + 7, (8 * channel) + 4);
- dsi_write_reg(DSI_TX_FIFO_VC_SIZE, l & ~mask);
-
- dsi_write_reg(DSI_TX_FIFO_VC_SIZE, l);
-}
-
static void dsi_config_tx_fifo(enum fifo_size size1, enum fifo_size size2,
enum fifo_size size3, enum fifo_size size4)
{
@@ -1748,13 +2069,13 @@ static void dsi_vc_initial_config(int channel)
r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */
r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */
r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */
+ if (dss_has_feature(FEAT_DSI_VC_OCP_WIDTH))
+ r = FLD_MOD(r, 3, 11, 10); /* OCP_WIDTH = 32 bit */
r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
dsi_write_reg(DSI_VC_CTRL(channel), r);
-
- dsi.vc[channel].mode = DSI_VC_MODE_L4;
}
static int dsi_vc_config_l4(int channel)
@@ -1774,6 +2095,10 @@ static int dsi_vc_config_l4(int channel)
REG_FLD_MOD(DSI_VC_CTRL(channel), 0, 1, 1); /* SOURCE, 0 = L4 */
+ /* DCS_CMD_ENABLE */
+ if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC))
+ REG_FLD_MOD(DSI_VC_CTRL(channel), 0, 30, 30);
+
dsi_vc_enable(channel, 1);
dsi.vc[channel].mode = DSI_VC_MODE_L4;
@@ -1798,6 +2123,10 @@ static int dsi_vc_config_vp(int channel)
REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 1, 1); /* SOURCE, 1 = video port */
+ /* DCS_CMD_ENABLE */
+ if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC))
+ REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 30, 30);
+
dsi_vc_enable(channel, 1);
dsi.vc[channel].mode = DSI_VC_MODE_VP;
@@ -1922,33 +2251,44 @@ static int dsi_vc_send_bta(int channel)
int dsi_vc_send_bta_sync(int channel)
{
+ DECLARE_COMPLETION_ONSTACK(completion);
int r = 0;
u32 err;
- INIT_COMPLETION(dsi.bta_completion);
+ r = dsi_register_isr_vc(channel, dsi_completion_handler,
+ &completion, DSI_VC_IRQ_BTA);
+ if (r)
+ goto err0;
- dsi_vc_enable_bta_irq(channel);
+ r = dsi_register_isr(dsi_completion_handler, &completion,
+ DSI_IRQ_ERROR_MASK);
+ if (r)
+ goto err1;
r = dsi_vc_send_bta(channel);
if (r)
- goto err;
+ goto err2;
- if (wait_for_completion_timeout(&dsi.bta_completion,
+ if (wait_for_completion_timeout(&completion,
msecs_to_jiffies(500)) == 0) {
DSSERR("Failed to receive BTA\n");
r = -EIO;
- goto err;
+ goto err2;
}
err = dsi_get_errors();
if (err) {
DSSERR("Error while sending BTA: %x\n", err);
r = -EIO;
- goto err;
+ goto err2;
}
-err:
- dsi_vc_disable_bta_irq(channel);
-
+err2:
+ dsi_unregister_isr(dsi_completion_handler, &completion,
+ DSI_IRQ_ERROR_MASK);
+err1:
+ dsi_unregister_isr_vc(channel, dsi_completion_handler,
+ &completion, DSI_VC_IRQ_BTA);
+err0:
return r;
}
EXPORT_SYMBOL(dsi_vc_send_bta_sync);
@@ -1961,7 +2301,7 @@ static inline void dsi_vc_write_long_header(int channel, u8 data_type,
WARN_ON(!dsi_bus_is_locked());
- data_id = data_type | channel << 6;
+ data_id = data_type | dsi.vc[channel].vc_id << 6;
val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
FLD_VAL(ecc, 31, 24);
@@ -2064,7 +2404,7 @@ static int dsi_vc_send_short(int channel, u8 data_type, u16 data, u8 ecc)
return -EINVAL;
}
- data_id = data_type | channel << 6;
+ data_id = data_type | dsi.vc[channel].vc_id << 6;
r = (data_id << 0) | (data << 8) | (ecc << 24);
@@ -2442,8 +2782,11 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
r = FLD_MOD(r, 2, 13, 12); /* LINE_BUFFER, 2 lines */
r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */
r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */
- r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */
- r = FLD_MOD(r, 0, 25, 25); /* DCS_CMD_CODE, 1=start, 0=continue */
+ if (!dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) {
+ r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */
+ /* DCS_CMD_CODE, 1=start, 0=continue */
+ r = FLD_MOD(r, 0, 25, 25);
+ }
dsi_write_reg(DSI_CTRL, r);
@@ -2762,19 +3105,20 @@ static void dsi_te_timeout(unsigned long arg)
}
#endif
+static void dsi_framedone_bta_callback(void *data, u32 mask);
+
static void dsi_handle_framedone(int error)
{
const int channel = dsi.update_channel;
- cancel_delayed_work(&dsi.framedone_timeout_work);
+ dsi_unregister_isr_vc(channel, dsi_framedone_bta_callback,
+ NULL, DSI_VC_IRQ_BTA);
- dsi_vc_disable_bta_irq(channel);
+ cancel_delayed_work(&dsi.framedone_timeout_work);
/* SIDLEMODE back to smart-idle */
dispc_enable_sidle();
- dsi.bta_callback = NULL;
-
if (dsi.te_enabled) {
/* enable LP_RX_TO again after the TE */
REG_FLD_MOD(DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
@@ -2808,7 +3152,7 @@ static void dsi_framedone_timeout_work_callback(struct work_struct *work)
dsi_handle_framedone(-ETIMEDOUT);
}
-static void dsi_framedone_bta_callback(void)
+static void dsi_framedone_bta_callback(void *data, u32 mask)
{
dsi_handle_framedone(0);
@@ -2848,15 +3192,19 @@ static void dsi_framedone_irq_callback(void *data, u32 mask)
* asynchronously.
* */
- dsi.bta_callback = dsi_framedone_bta_callback;
-
- barrier();
-
- dsi_vc_enable_bta_irq(channel);
+ r = dsi_register_isr_vc(channel, dsi_framedone_bta_callback,
+ NULL, DSI_VC_IRQ_BTA);
+ if (r) {
+ DSSERR("Failed to register BTA ISR\n");
+ dsi_handle_framedone(-EIO);
+ return;
+ }
r = dsi_vc_send_bta(channel);
if (r) {
DSSERR("BTA after framedone failed\n");
+ dsi_unregister_isr_vc(channel, dsi_framedone_bta_callback,
+ NULL, DSI_VC_IRQ_BTA);
dsi_handle_framedone(-EIO);
}
}
@@ -2984,12 +3332,12 @@ static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
struct dsi_clock_info cinfo;
int r;
- /* we always use DSS2_FCK as input clock */
- cinfo.use_dss2_fck = true;
+ /* we always use DSS_CLK_SYSCK as input clock */
+ cinfo.use_sys_clk = true;
cinfo.regn = dssdev->phy.dsi.div.regn;
cinfo.regm = dssdev->phy.dsi.div.regm;
- cinfo.regm3 = dssdev->phy.dsi.div.regm3;
- cinfo.regm4 = dssdev->phy.dsi.div.regm4;
+ cinfo.regm_dispc = dssdev->phy.dsi.div.regm_dispc;
+ cinfo.regm_dsi = dssdev->phy.dsi.div.regm_dsi;
r = dsi_calc_clock_rates(dssdev, &cinfo);
if (r) {
DSSERR("Failed to calc dsi clocks\n");
@@ -3011,7 +3359,7 @@ static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
int r;
unsigned long long fck;
- fck = dsi_get_dsi1_pll_rate();
+ fck = dsi_get_pll_hsdiv_dispc_rate();
dispc_cinfo.lck_div = dssdev->phy.dsi.div.lck_div;
dispc_cinfo.pck_div = dssdev->phy.dsi.div.pck_div;
@@ -3035,6 +3383,10 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
{
int r;
+ /* The SCPClk is required for both PLL and CIO registers on OMAP4 */
+ /* CIO_CLK_ICG, enable L3 clk to CIO */
+ REG_FLD_MOD(DSI_CLK_CTRL, 1, 14, 14);
+
_dsi_print_reset_status();
r = dsi_pll_init(dssdev, true, true);
@@ -3045,8 +3397,10 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
if (r)
goto err1;
- dss_select_dispc_clk_source(DSS_SRC_DSI1_PLL_FCLK);
- dss_select_dsi_clk_source(DSS_SRC_DSI2_PLL_FCLK);
+ dss_select_dispc_clk_source(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC);
+ dss_select_dsi_clk_source(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI);
+ dss_select_lcd_clk_source(dssdev->manager->id,
+ DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC);
DSSDBG("PLL OK\n");
@@ -3082,8 +3436,8 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
err3:
dsi_complexio_uninit();
err2:
- dss_select_dispc_clk_source(DSS_SRC_DSS1_ALWON_FCLK);
- dss_select_dsi_clk_source(DSS_SRC_DSS1_ALWON_FCLK);
+ dss_select_dispc_clk_source(DSS_CLK_SRC_FCK);
+ dss_select_dsi_clk_source(DSS_CLK_SRC_FCK);
err1:
dsi_pll_uninit();
err0:
@@ -3099,8 +3453,8 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev)
dsi_vc_enable(2, 0);
dsi_vc_enable(3, 0);
- dss_select_dispc_clk_source(DSS_SRC_DSS1_ALWON_FCLK);
- dss_select_dsi_clk_source(DSS_SRC_DSS1_ALWON_FCLK);
+ dss_select_dispc_clk_source(DSS_CLK_SRC_FCK);
+ dss_select_dsi_clk_source(DSS_CLK_SRC_FCK);
dsi_complexio_uninit();
dsi_pll_uninit();
}
@@ -3220,29 +3574,107 @@ int dsi_init_display(struct omap_dss_device *dssdev)
dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
- dsi.vc[0].dssdev = dssdev;
- dsi.vc[1].dssdev = dssdev;
+ if (dsi.vdds_dsi_reg == NULL) {
+ struct regulator *vdds_dsi;
+
+ vdds_dsi = regulator_get(&dsi.pdev->dev, "vdds_dsi");
+
+ if (IS_ERR(vdds_dsi)) {
+ DSSERR("can't get VDDS_DSI regulator\n");
+ return PTR_ERR(vdds_dsi);
+ }
+
+ dsi.vdds_dsi_reg = vdds_dsi;
+ }
+
+ return 0;
+}
+
+int omap_dsi_request_vc(struct omap_dss_device *dssdev, int *channel)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dsi.vc); i++) {
+ if (!dsi.vc[i].dssdev) {
+ dsi.vc[i].dssdev = dssdev;
+ *channel = i;
+ return 0;
+ }
+ }
+
+ DSSERR("cannot get VC for display %s", dssdev->name);
+ return -ENOSPC;
+}
+EXPORT_SYMBOL(omap_dsi_request_vc);
+
+int omap_dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
+{
+ if (vc_id < 0 || vc_id > 3) {
+ DSSERR("VC ID out of range\n");
+ return -EINVAL;
+ }
+
+ if (channel < 0 || channel > 3) {
+ DSSERR("Virtual Channel out of range\n");
+ return -EINVAL;
+ }
+
+ if (dsi.vc[channel].dssdev != dssdev) {
+ DSSERR("Virtual Channel not allocated to display %s\n",
+ dssdev->name);
+ return -EINVAL;
+ }
+
+ dsi.vc[channel].vc_id = vc_id;
return 0;
}
+EXPORT_SYMBOL(omap_dsi_set_vc_id);
-void dsi_wait_dsi1_pll_active(void)
+void omap_dsi_release_vc(struct omap_dss_device *dssdev, int channel)
+{
+ if ((channel >= 0 && channel <= 3) &&
+ dsi.vc[channel].dssdev == dssdev) {
+ dsi.vc[channel].dssdev = NULL;
+ dsi.vc[channel].vc_id = 0;
+ }
+}
+EXPORT_SYMBOL(omap_dsi_release_vc);
+
+void dsi_wait_pll_hsdiv_dispc_active(void)
{
if (wait_for_bit_change(DSI_PLL_STATUS, 7, 1) != 1)
- DSSERR("DSI1 PLL clock not active\n");
+ DSSERR("%s (%s) not active\n",
+ dss_get_generic_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
+ dss_feat_get_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC));
}
-void dsi_wait_dsi2_pll_active(void)
+void dsi_wait_pll_hsdiv_dsi_active(void)
{
if (wait_for_bit_change(DSI_PLL_STATUS, 8, 1) != 1)
- DSSERR("DSI2 PLL clock not active\n");
+ DSSERR("%s (%s) not active\n",
+ dss_get_generic_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
+ dss_feat_get_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI));
+}
+
+static void dsi_calc_clock_param_ranges(void)
+{
+ dsi.regn_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGN);
+ dsi.regm_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM);
+ dsi.regm_dispc_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DISPC);
+ dsi.regm_dsi_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DSI);
+ dsi.fint_min = dss_feat_get_param_min(FEAT_PARAM_DSIPLL_FINT);
+ dsi.fint_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_FINT);
+ dsi.lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV);
}
-int dsi_init(struct platform_device *pdev)
+static int dsi_init(struct platform_device *pdev)
{
u32 rev;
- int r;
+ int r, i;
+ struct resource *dsi_mem;
+ spin_lock_init(&dsi.irq_lock);
spin_lock_init(&dsi.errors_lock);
dsi.errors = 0;
@@ -3251,8 +3683,6 @@ int dsi_init(struct platform_device *pdev)
dsi.irq_stats.last_reset = jiffies;
#endif
- init_completion(&dsi.bta_completion);
-
mutex_init(&dsi.lock);
sema_init(&dsi.bus_lock, 1);
@@ -3268,24 +3698,45 @@ int dsi_init(struct platform_device *pdev)
dsi.te_timer.function = dsi_te_timeout;
dsi.te_timer.data = 0;
#endif
- dsi.base = ioremap(DSI_BASE, DSI_SZ_REGS);
+ dsi_mem = platform_get_resource(dsi.pdev, IORESOURCE_MEM, 0);
+ if (!dsi_mem) {
+ DSSERR("can't get IORESOURCE_MEM DSI\n");
+ r = -EINVAL;
+ goto err1;
+ }
+ dsi.base = ioremap(dsi_mem->start, resource_size(dsi_mem));
if (!dsi.base) {
DSSERR("can't ioremap DSI\n");
r = -ENOMEM;
goto err1;
}
+ dsi.irq = platform_get_irq(dsi.pdev, 0);
+ if (dsi.irq < 0) {
+ DSSERR("platform_get_irq failed\n");
+ r = -ENODEV;
+ goto err2;
+ }
- dsi.vdds_dsi_reg = dss_get_vdds_dsi();
- if (IS_ERR(dsi.vdds_dsi_reg)) {
- DSSERR("can't get VDDS_DSI regulator\n");
- r = PTR_ERR(dsi.vdds_dsi_reg);
+ r = request_irq(dsi.irq, omap_dsi_irq_handler, IRQF_SHARED,
+ "OMAP DSI1", dsi.pdev);
+ if (r < 0) {
+ DSSERR("request_irq failed\n");
goto err2;
}
+ /* DSI VCs initialization */
+ for (i = 0; i < ARRAY_SIZE(dsi.vc); i++) {
+ dsi.vc[i].mode = DSI_VC_MODE_L4;
+ dsi.vc[i].dssdev = NULL;
+ dsi.vc[i].vc_id = 0;
+ }
+
+ dsi_calc_clock_param_ranges();
+
enable_clocks(1);
rev = dsi_read_reg(DSI_REVISION);
- printk(KERN_INFO "OMAP DSI rev %d.%d\n",
+ dev_dbg(&pdev->dev, "OMAP DSI rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
enable_clocks(0);
@@ -3298,8 +3749,14 @@ err1:
return r;
}
-void dsi_exit(void)
+static void dsi_exit(void)
{
+ if (dsi.vdds_dsi_reg != NULL) {
+ regulator_put(dsi.vdds_dsi_reg);
+ dsi.vdds_dsi_reg = NULL;
+ }
+
+ free_irq(dsi.irq, dsi.pdev);
iounmap(dsi.base);
destroy_workqueue(dsi.workqueue);
@@ -3307,3 +3764,41 @@ void dsi_exit(void)
DSSDBG("omap_dsi_exit\n");
}
+/* DSI1 HW IP initialisation */
+static int omap_dsi1hw_probe(struct platform_device *pdev)
+{
+ int r;
+ dsi.pdev = pdev;
+ r = dsi_init(pdev);
+ if (r) {
+ DSSERR("Failed to initialize DSI\n");
+ goto err_dsi;
+ }
+err_dsi:
+ return r;
+}
+
+static int omap_dsi1hw_remove(struct platform_device *pdev)
+{
+ dsi_exit();
+ return 0;
+}
+
+static struct platform_driver omap_dsi1hw_driver = {
+ .probe = omap_dsi1hw_probe,
+ .remove = omap_dsi1hw_remove,
+ .driver = {
+ .name = "omapdss_dsi1",
+ .owner = THIS_MODULE,
+ },
+};
+
+int dsi_init_platform_driver(void)
+{
+ return platform_driver_register(&omap_dsi1hw_driver);
+}
+
+void dsi_uninit_platform_driver(void)
+{
+ return platform_driver_unregister(&omap_dsi1hw_driver);
+}
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 77c3621c917..3f1fee63c67 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -26,14 +26,13 @@
#include <linux/io.h>
#include <linux/err.h>
#include <linux/delay.h>
-#include <linux/interrupt.h>
#include <linux/seq_file.h>
#include <linux/clk.h>
#include <plat/display.h>
+#include <plat/clock.h>
#include "dss.h"
-
-#define DSS_BASE 0x48050000
+#include "dss_features.h"
#define DSS_SZ_REGS SZ_512
@@ -59,9 +58,17 @@ struct dss_reg {
dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end))
static struct {
+ struct platform_device *pdev;
void __iomem *base;
+ int ctx_id;
struct clk *dpll4_m4_ck;
+ struct clk *dss_ick;
+ struct clk *dss_fck;
+ struct clk *dss_sys_clk;
+ struct clk *dss_tv_fck;
+ struct clk *dss_video_fck;
+ unsigned num_clks_enabled;
unsigned long cache_req_pck;
unsigned long cache_prate;
@@ -70,10 +77,22 @@ static struct {
enum dss_clk_source dsi_clk_source;
enum dss_clk_source dispc_clk_source;
+ enum dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS];
u32 ctx[DSS_SZ_REGS / sizeof(u32)];
} dss;
+static const char * const dss_generic_clk_source_names[] = {
+ [DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI_PLL_HSDIV_DISPC",
+ [DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI_PLL_HSDIV_DSI",
+ [DSS_CLK_SRC_FCK] = "DSS_FCK",
+};
+
+static void dss_clk_enable_all_no_ctx(void);
+static void dss_clk_disable_all_no_ctx(void);
+static void dss_clk_enable_no_ctx(enum dss_clock clks);
+static void dss_clk_disable_no_ctx(enum dss_clock clks);
+
static int _omap_dss_wait_reset(void);
static inline void dss_write_reg(const struct dss_reg idx, u32 val)
@@ -99,10 +118,11 @@ void dss_save_context(void)
SR(SYSCONFIG);
SR(CONTROL);
-#ifdef CONFIG_OMAP2_DSS_SDI
- SR(SDI_CONTROL);
- SR(PLL_CONTROL);
-#endif
+ if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
+ OMAP_DISPLAY_TYPE_SDI) {
+ SR(SDI_CONTROL);
+ SR(PLL_CONTROL);
+ }
}
void dss_restore_context(void)
@@ -113,10 +133,11 @@ void dss_restore_context(void)
RR(SYSCONFIG);
RR(CONTROL);
-#ifdef CONFIG_OMAP2_DSS_SDI
- RR(SDI_CONTROL);
- RR(PLL_CONTROL);
-#endif
+ if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
+ OMAP_DISPLAY_TYPE_SDI) {
+ RR(SDI_CONTROL);
+ RR(PLL_CONTROL);
+ }
}
#undef SR
@@ -209,66 +230,96 @@ void dss_sdi_disable(void)
REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
}
+const char *dss_get_generic_clk_source_name(enum dss_clk_source clk_src)
+{
+ return dss_generic_clk_source_names[clk_src];
+}
+
void dss_dump_clocks(struct seq_file *s)
{
unsigned long dpll4_ck_rate;
unsigned long dpll4_m4_ck_rate;
+ const char *fclk_name, *fclk_real_name;
+ unsigned long fclk_rate;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
-
- dpll4_ck_rate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
- dpll4_m4_ck_rate = clk_get_rate(dss.dpll4_m4_ck);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
seq_printf(s, "- DSS -\n");
- seq_printf(s, "dpll4_ck %lu\n", dpll4_ck_rate);
+ fclk_name = dss_get_generic_clk_source_name(DSS_CLK_SRC_FCK);
+ fclk_real_name = dss_feat_get_clk_source_name(DSS_CLK_SRC_FCK);
+ fclk_rate = dss_clk_get_rate(DSS_CLK_FCK);
- if (cpu_is_omap3630())
- seq_printf(s, "dss1_alwon_fclk = %lu / %lu = %lu\n",
- dpll4_ck_rate,
- dpll4_ck_rate / dpll4_m4_ck_rate,
- dss_clk_get_rate(DSS_CLK_FCK1));
- else
- seq_printf(s, "dss1_alwon_fclk = %lu / %lu * 2 = %lu\n",
- dpll4_ck_rate,
- dpll4_ck_rate / dpll4_m4_ck_rate,
- dss_clk_get_rate(DSS_CLK_FCK1));
+ if (dss.dpll4_m4_ck) {
+ dpll4_ck_rate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
+ dpll4_m4_ck_rate = clk_get_rate(dss.dpll4_m4_ck);
+
+ seq_printf(s, "dpll4_ck %lu\n", dpll4_ck_rate);
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ if (cpu_is_omap3630() || cpu_is_omap44xx())
+ seq_printf(s, "%s (%s) = %lu / %lu = %lu\n",
+ fclk_name, fclk_real_name,
+ dpll4_ck_rate,
+ dpll4_ck_rate / dpll4_m4_ck_rate,
+ fclk_rate);
+ else
+ seq_printf(s, "%s (%s) = %lu / %lu * 2 = %lu\n",
+ fclk_name, fclk_real_name,
+ dpll4_ck_rate,
+ dpll4_ck_rate / dpll4_m4_ck_rate,
+ fclk_rate);
+ } else {
+ seq_printf(s, "%s (%s) = %lu\n",
+ fclk_name, fclk_real_name,
+ fclk_rate);
+ }
+
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
}
void dss_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
DUMPREG(DSS_REVISION);
DUMPREG(DSS_SYSCONFIG);
DUMPREG(DSS_SYSSTATUS);
DUMPREG(DSS_IRQSTATUS);
DUMPREG(DSS_CONTROL);
- DUMPREG(DSS_SDI_CONTROL);
- DUMPREG(DSS_PLL_CONTROL);
- DUMPREG(DSS_SDI_STATUS);
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
+ OMAP_DISPLAY_TYPE_SDI) {
+ DUMPREG(DSS_SDI_CONTROL);
+ DUMPREG(DSS_PLL_CONTROL);
+ DUMPREG(DSS_SDI_STATUS);
+ }
+
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
#undef DUMPREG
}
void dss_select_dispc_clk_source(enum dss_clk_source clk_src)
{
int b;
+ u8 start, end;
+
+ switch (clk_src) {
+ case DSS_CLK_SRC_FCK:
+ b = 0;
+ break;
+ case DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
+ b = 1;
+ dsi_wait_pll_hsdiv_dispc_active();
+ break;
+ default:
+ BUG();
+ }
- BUG_ON(clk_src != DSS_SRC_DSI1_PLL_FCLK &&
- clk_src != DSS_SRC_DSS1_ALWON_FCLK);
-
- b = clk_src == DSS_SRC_DSS1_ALWON_FCLK ? 0 : 1;
-
- if (clk_src == DSS_SRC_DSI1_PLL_FCLK)
- dsi_wait_dsi1_pll_active();
+ dss_feat_get_reg_field(FEAT_REG_DISPC_CLK_SWITCH, &start, &end);
- REG_FLD_MOD(DSS_CONTROL, b, 0, 0); /* DISPC_CLK_SWITCH */
+ REG_FLD_MOD(DSS_CONTROL, b, start, end); /* DISPC_CLK_SWITCH */
dss.dispc_clk_source = clk_src;
}
@@ -277,19 +328,51 @@ void dss_select_dsi_clk_source(enum dss_clk_source clk_src)
{
int b;
- BUG_ON(clk_src != DSS_SRC_DSI2_PLL_FCLK &&
- clk_src != DSS_SRC_DSS1_ALWON_FCLK);
-
- b = clk_src == DSS_SRC_DSS1_ALWON_FCLK ? 0 : 1;
-
- if (clk_src == DSS_SRC_DSI2_PLL_FCLK)
- dsi_wait_dsi2_pll_active();
+ switch (clk_src) {
+ case DSS_CLK_SRC_FCK:
+ b = 0;
+ break;
+ case DSS_CLK_SRC_DSI_PLL_HSDIV_DSI:
+ b = 1;
+ dsi_wait_pll_hsdiv_dsi_active();
+ break;
+ default:
+ BUG();
+ }
REG_FLD_MOD(DSS_CONTROL, b, 1, 1); /* DSI_CLK_SWITCH */
dss.dsi_clk_source = clk_src;
}
+void dss_select_lcd_clk_source(enum omap_channel channel,
+ enum dss_clk_source clk_src)
+{
+ int b, ix, pos;
+
+ if (!dss_has_feature(FEAT_LCD_CLK_SRC))
+ return;
+
+ switch (clk_src) {
+ case DSS_CLK_SRC_FCK:
+ b = 0;
+ break;
+ case DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
+ BUG_ON(channel != OMAP_DSS_CHANNEL_LCD);
+ b = 1;
+ dsi_wait_pll_hsdiv_dispc_active();
+ break;
+ default:
+ BUG();
+ }
+
+ pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 12;
+ REG_FLD_MOD(DSS_CONTROL, b, pos, pos); /* LCDx_CLK_SWITCH */
+
+ ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 1;
+ dss.lcd_clk_source[ix] = clk_src;
+}
+
enum dss_clk_source dss_get_dispc_clk_source(void)
{
return dss.dispc_clk_source;
@@ -300,34 +383,52 @@ enum dss_clk_source dss_get_dsi_clk_source(void)
return dss.dsi_clk_source;
}
+enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel)
+{
+ int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 1;
+ return dss.lcd_clk_source[ix];
+}
+
/* calculate clock rates using dividers in cinfo */
int dss_calc_clock_rates(struct dss_clock_info *cinfo)
{
- unsigned long prate;
+ if (dss.dpll4_m4_ck) {
+ unsigned long prate;
+ u16 fck_div_max = 16;
- if (cinfo->fck_div > (cpu_is_omap3630() ? 32 : 16) ||
- cinfo->fck_div == 0)
- return -EINVAL;
+ if (cpu_is_omap3630() || cpu_is_omap44xx())
+ fck_div_max = 32;
+
+ if (cinfo->fck_div > fck_div_max || cinfo->fck_div == 0)
+ return -EINVAL;
- prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
+ prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
- cinfo->fck = prate / cinfo->fck_div;
+ cinfo->fck = prate / cinfo->fck_div;
+ } else {
+ if (cinfo->fck_div != 0)
+ return -EINVAL;
+ cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK);
+ }
return 0;
}
int dss_set_clock_div(struct dss_clock_info *cinfo)
{
- unsigned long prate;
- int r;
+ if (dss.dpll4_m4_ck) {
+ unsigned long prate;
+ int r;
- if (cpu_is_omap34xx()) {
prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
DSSDBG("dpll4_m4 = %ld\n", prate);
r = clk_set_rate(dss.dpll4_m4_ck, prate / cinfo->fck_div);
if (r)
return r;
+ } else {
+ if (cinfo->fck_div != 0)
+ return -EINVAL;
}
DSSDBG("fck = %ld (%d)\n", cinfo->fck, cinfo->fck_div);
@@ -337,12 +438,14 @@ int dss_set_clock_div(struct dss_clock_info *cinfo)
int dss_get_clock_div(struct dss_clock_info *cinfo)
{
- cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK1);
+ cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK);
- if (cpu_is_omap34xx()) {
+ if (dss.dpll4_m4_ck) {
unsigned long prate;
+
prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
- if (cpu_is_omap3630())
+
+ if (cpu_is_omap3630() || cpu_is_omap44xx())
cinfo->fck_div = prate / (cinfo->fck);
else
cinfo->fck_div = prate / (cinfo->fck / 2);
@@ -355,7 +458,7 @@ int dss_get_clock_div(struct dss_clock_info *cinfo)
unsigned long dss_get_dpll4_rate(void)
{
- if (cpu_is_omap34xx())
+ if (dss.dpll4_m4_ck)
return clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
else
return 0;
@@ -369,16 +472,18 @@ int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
struct dss_clock_info best_dss;
struct dispc_clock_info best_dispc;
- unsigned long fck;
+ unsigned long fck, max_dss_fck;
- u16 fck_div;
+ u16 fck_div, fck_div_max = 16;
int match = 0;
int min_fck_per_pck;
prate = dss_get_dpll4_rate();
- fck = dss_clk_get_rate(DSS_CLK_FCK1);
+ max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
+
+ fck = dss_clk_get_rate(DSS_CLK_FCK);
if (req_pck == dss.cache_req_pck &&
((cpu_is_omap34xx() && prate == dss.cache_prate) ||
dss.cache_dss_cinfo.fck == fck)) {
@@ -391,7 +496,7 @@ int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
if (min_fck_per_pck &&
- req_pck * min_fck_per_pck > DISPC_MAX_FCK) {
+ req_pck * min_fck_per_pck > max_dss_fck) {
DSSERR("Requested pixel clock not possible with the current "
"OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
"the constraint off.\n");
@@ -402,10 +507,10 @@ retry:
memset(&best_dss, 0, sizeof(best_dss));
memset(&best_dispc, 0, sizeof(best_dispc));
- if (cpu_is_omap24xx()) {
+ if (dss.dpll4_m4_ck == NULL) {
struct dispc_clock_info cur_dispc;
/* XXX can we change the clock on omap2? */
- fck = dss_clk_get_rate(DSS_CLK_FCK1);
+ fck = dss_clk_get_rate(DSS_CLK_FCK);
fck_div = 1;
dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc);
@@ -417,17 +522,19 @@ retry:
best_dispc = cur_dispc;
goto found;
- } else if (cpu_is_omap34xx()) {
- for (fck_div = (cpu_is_omap3630() ? 32 : 16);
- fck_div > 0; --fck_div) {
+ } else {
+ if (cpu_is_omap3630() || cpu_is_omap44xx())
+ fck_div_max = 32;
+
+ for (fck_div = fck_div_max; fck_div > 0; --fck_div) {
struct dispc_clock_info cur_dispc;
- if (cpu_is_omap3630())
+ if (fck_div_max == 32)
fck = prate / fck_div;
else
fck = prate / fck_div * 2;
- if (fck > DISPC_MAX_FCK)
+ if (fck > max_dss_fck)
continue;
if (min_fck_per_pck &&
@@ -450,8 +557,6 @@ retry:
goto found;
}
}
- } else {
- BUG();
}
found:
@@ -482,31 +587,6 @@ found:
return 0;
}
-
-
-static irqreturn_t dss_irq_handler_omap2(int irq, void *arg)
-{
- dispc_irq_handler();
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t dss_irq_handler_omap3(int irq, void *arg)
-{
- u32 irqstatus;
-
- irqstatus = dss_read_reg(DSS_IRQSTATUS);
-
- if (irqstatus & (1<<0)) /* DISPC_IRQ */
- dispc_irq_handler();
-#ifdef CONFIG_OMAP2_DSS_DSI
- if (irqstatus & (1<<1)) /* DSI_IRQ */
- dsi_irq_handler();
-#endif
-
- return IRQ_HANDLED;
-}
-
static int _omap_dss_wait_reset(void)
{
int t = 0;
@@ -549,34 +629,45 @@ void dss_set_dac_pwrdn_bgz(bool enable)
REG_FLD_MOD(DSS_CONTROL, enable, 5, 5); /* DAC Power-Down Control */
}
-int dss_init(bool skip_init)
+void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select hdmi)
+{
+ REG_FLD_MOD(DSS_CONTROL, hdmi, 15, 15); /* VENC_HDMI_SWITCH */
+}
+
+static int dss_init(void)
{
int r;
u32 rev;
+ struct resource *dss_mem;
+ struct clk *dpll4_m4_ck;
- dss.base = ioremap(DSS_BASE, DSS_SZ_REGS);
+ dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0);
+ if (!dss_mem) {
+ DSSERR("can't get IORESOURCE_MEM DSS\n");
+ r = -EINVAL;
+ goto fail0;
+ }
+ dss.base = ioremap(dss_mem->start, resource_size(dss_mem));
if (!dss.base) {
DSSERR("can't ioremap DSS\n");
r = -ENOMEM;
goto fail0;
}
- if (!skip_init) {
- /* disable LCD and DIGIT output. This seems to fix the synclost
- * problem that we get, if the bootloader starts the DSS and
- * the kernel resets it */
- omap_writel(omap_readl(0x48050440) & ~0x3, 0x48050440);
+ /* disable LCD and DIGIT output. This seems to fix the synclost
+ * problem that we get, if the bootloader starts the DSS and
+ * the kernel resets it */
+ omap_writel(omap_readl(0x48050440) & ~0x3, 0x48050440);
- /* We need to wait here a bit, otherwise we sometimes start to
- * get synclost errors, and after that only power cycle will
- * restore DSS functionality. I have no idea why this happens.
- * And we have to wait _before_ resetting the DSS, but after
- * enabling clocks.
- */
- msleep(50);
+ /* We need to wait here a bit, otherwise we sometimes start to
+ * get synclost errors, and after that only power cycle will
+ * restore DSS functionality. I have no idea why this happens.
+ * And we have to wait _before_ resetting the DSS, but after
+ * enabling clocks.
+ */
+ msleep(50);
- _omap_dss_reset();
- }
+ _omap_dss_reset();
/* autoidle */
REG_FLD_MOD(DSS_SYSCONFIG, 1, 0, 0);
@@ -589,29 +680,30 @@ int dss_init(bool skip_init)
REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
#endif
-
- r = request_irq(INT_24XX_DSS_IRQ,
- cpu_is_omap24xx()
- ? dss_irq_handler_omap2
- : dss_irq_handler_omap3,
- 0, "OMAP DSS", NULL);
-
- if (r < 0) {
- DSSERR("omap2 dss: request_irq failed\n");
- goto fail1;
- }
-
if (cpu_is_omap34xx()) {
- dss.dpll4_m4_ck = clk_get(NULL, "dpll4_m4_ck");
- if (IS_ERR(dss.dpll4_m4_ck)) {
+ dpll4_m4_ck = clk_get(NULL, "dpll4_m4_ck");
+ if (IS_ERR(dpll4_m4_ck)) {
DSSERR("Failed to get dpll4_m4_ck\n");
- r = PTR_ERR(dss.dpll4_m4_ck);
- goto fail2;
+ r = PTR_ERR(dpll4_m4_ck);
+ goto fail1;
}
+ } else if (cpu_is_omap44xx()) {
+ dpll4_m4_ck = clk_get(NULL, "dpll_per_m5x2_ck");
+ if (IS_ERR(dpll4_m4_ck)) {
+ DSSERR("Failed to get dpll4_m4_ck\n");
+ r = PTR_ERR(dpll4_m4_ck);
+ goto fail1;
+ }
+ } else { /* omap24xx */
+ dpll4_m4_ck = NULL;
}
- dss.dsi_clk_source = DSS_SRC_DSS1_ALWON_FCLK;
- dss.dispc_clk_source = DSS_SRC_DSS1_ALWON_FCLK;
+ dss.dpll4_m4_ck = dpll4_m4_ck;
+
+ dss.dsi_clk_source = DSS_CLK_SRC_FCK;
+ dss.dispc_clk_source = DSS_CLK_SRC_FCK;
+ dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK;
+ dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK;
dss_save_context();
@@ -621,21 +713,416 @@ int dss_init(bool skip_init)
return 0;
-fail2:
- free_irq(INT_24XX_DSS_IRQ, NULL);
fail1:
iounmap(dss.base);
fail0:
return r;
}
-void dss_exit(void)
+static void dss_exit(void)
{
- if (cpu_is_omap34xx())
+ if (dss.dpll4_m4_ck)
clk_put(dss.dpll4_m4_ck);
- free_irq(INT_24XX_DSS_IRQ, NULL);
-
iounmap(dss.base);
}
+/* CONTEXT */
+static int dss_get_ctx_id(void)
+{
+ struct omap_display_platform_data *pdata = dss.pdev->dev.platform_data;
+ int r;
+
+ if (!pdata->board_data->get_last_off_on_transaction_id)
+ return 0;
+ r = pdata->board_data->get_last_off_on_transaction_id(&dss.pdev->dev);
+ if (r < 0) {
+ dev_err(&dss.pdev->dev, "getting transaction ID failed, "
+ "will force context restore\n");
+ r = -1;
+ }
+ return r;
+}
+
+int dss_need_ctx_restore(void)
+{
+ int id = dss_get_ctx_id();
+
+ if (id < 0 || id != dss.ctx_id) {
+ DSSDBG("ctx id %d -> id %d\n",
+ dss.ctx_id, id);
+ dss.ctx_id = id;
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static void save_all_ctx(void)
+{
+ DSSDBG("save context\n");
+
+ dss_clk_enable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK);
+
+ dss_save_context();
+ dispc_save_context();
+#ifdef CONFIG_OMAP2_DSS_DSI
+ dsi_save_context();
+#endif
+
+ dss_clk_disable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK);
+}
+
+static void restore_all_ctx(void)
+{
+ DSSDBG("restore context\n");
+
+ dss_clk_enable_all_no_ctx();
+
+ dss_restore_context();
+ dispc_restore_context();
+#ifdef CONFIG_OMAP2_DSS_DSI
+ dsi_restore_context();
+#endif
+
+ dss_clk_disable_all_no_ctx();
+}
+
+static int dss_get_clock(struct clk **clock, const char *clk_name)
+{
+ struct clk *clk;
+
+ clk = clk_get(&dss.pdev->dev, clk_name);
+
+ if (IS_ERR(clk)) {
+ DSSERR("can't get clock %s", clk_name);
+ return PTR_ERR(clk);
+ }
+
+ *clock = clk;
+
+ DSSDBG("clk %s, rate %ld\n", clk_name, clk_get_rate(clk));
+
+ return 0;
+}
+
+static int dss_get_clocks(void)
+{
+ int r;
+ struct omap_display_platform_data *pdata = dss.pdev->dev.platform_data;
+
+ dss.dss_ick = NULL;
+ dss.dss_fck = NULL;
+ dss.dss_sys_clk = NULL;
+ dss.dss_tv_fck = NULL;
+ dss.dss_video_fck = NULL;
+
+ r = dss_get_clock(&dss.dss_ick, "ick");
+ if (r)
+ goto err;
+
+ r = dss_get_clock(&dss.dss_fck, "fck");
+ if (r)
+ goto err;
+
+ if (!pdata->opt_clock_available) {
+ r = -ENODEV;
+ goto err;
+ }
+
+ if (pdata->opt_clock_available("sys_clk")) {
+ r = dss_get_clock(&dss.dss_sys_clk, "sys_clk");
+ if (r)
+ goto err;
+ }
+
+ if (pdata->opt_clock_available("tv_clk")) {
+ r = dss_get_clock(&dss.dss_tv_fck, "tv_clk");
+ if (r)
+ goto err;
+ }
+
+ if (pdata->opt_clock_available("video_clk")) {
+ r = dss_get_clock(&dss.dss_video_fck, "video_clk");
+ if (r)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ if (dss.dss_ick)
+ clk_put(dss.dss_ick);
+ if (dss.dss_fck)
+ clk_put(dss.dss_fck);
+ if (dss.dss_sys_clk)
+ clk_put(dss.dss_sys_clk);
+ if (dss.dss_tv_fck)
+ clk_put(dss.dss_tv_fck);
+ if (dss.dss_video_fck)
+ clk_put(dss.dss_video_fck);
+
+ return r;
+}
+
+static void dss_put_clocks(void)
+{
+ if (dss.dss_video_fck)
+ clk_put(dss.dss_video_fck);
+ if (dss.dss_tv_fck)
+ clk_put(dss.dss_tv_fck);
+ if (dss.dss_sys_clk)
+ clk_put(dss.dss_sys_clk);
+ clk_put(dss.dss_fck);
+ clk_put(dss.dss_ick);
+}
+
+unsigned long dss_clk_get_rate(enum dss_clock clk)
+{
+ switch (clk) {
+ case DSS_CLK_ICK:
+ return clk_get_rate(dss.dss_ick);
+ case DSS_CLK_FCK:
+ return clk_get_rate(dss.dss_fck);
+ case DSS_CLK_SYSCK:
+ return clk_get_rate(dss.dss_sys_clk);
+ case DSS_CLK_TVFCK:
+ return clk_get_rate(dss.dss_tv_fck);
+ case DSS_CLK_VIDFCK:
+ return clk_get_rate(dss.dss_video_fck);
+ }
+
+ BUG();
+ return 0;
+}
+
+static unsigned count_clk_bits(enum dss_clock clks)
+{
+ unsigned num_clks = 0;
+
+ if (clks & DSS_CLK_ICK)
+ ++num_clks;
+ if (clks & DSS_CLK_FCK)
+ ++num_clks;
+ if (clks & DSS_CLK_SYSCK)
+ ++num_clks;
+ if (clks & DSS_CLK_TVFCK)
+ ++num_clks;
+ if (clks & DSS_CLK_VIDFCK)
+ ++num_clks;
+
+ return num_clks;
+}
+
+static void dss_clk_enable_no_ctx(enum dss_clock clks)
+{
+ unsigned num_clks = count_clk_bits(clks);
+
+ if (clks & DSS_CLK_ICK)
+ clk_enable(dss.dss_ick);
+ if (clks & DSS_CLK_FCK)
+ clk_enable(dss.dss_fck);
+ if ((clks & DSS_CLK_SYSCK) && dss.dss_sys_clk)
+ clk_enable(dss.dss_sys_clk);
+ if ((clks & DSS_CLK_TVFCK) && dss.dss_tv_fck)
+ clk_enable(dss.dss_tv_fck);
+ if ((clks & DSS_CLK_VIDFCK) && dss.dss_video_fck)
+ clk_enable(dss.dss_video_fck);
+
+ dss.num_clks_enabled += num_clks;
+}
+
+void dss_clk_enable(enum dss_clock clks)
+{
+ bool check_ctx = dss.num_clks_enabled == 0;
+
+ dss_clk_enable_no_ctx(clks);
+
+ /*
+ * HACK: On omap4 the registers may not be accessible right after
+ * enabling the clocks. At some point this will be handled by
+ * pm_runtime, but for the time begin this should make things work.
+ */
+ if (cpu_is_omap44xx() && check_ctx)
+ udelay(10);
+
+ if (check_ctx && cpu_is_omap34xx() && dss_need_ctx_restore())
+ restore_all_ctx();
+}
+
+static void dss_clk_disable_no_ctx(enum dss_clock clks)
+{
+ unsigned num_clks = count_clk_bits(clks);
+
+ if (clks & DSS_CLK_ICK)
+ clk_disable(dss.dss_ick);
+ if (clks & DSS_CLK_FCK)
+ clk_disable(dss.dss_fck);
+ if ((clks & DSS_CLK_SYSCK) && dss.dss_sys_clk)
+ clk_disable(dss.dss_sys_clk);
+ if ((clks & DSS_CLK_TVFCK) && dss.dss_tv_fck)
+ clk_disable(dss.dss_tv_fck);
+ if ((clks & DSS_CLK_VIDFCK) && dss.dss_video_fck)
+ clk_disable(dss.dss_video_fck);
+
+ dss.num_clks_enabled -= num_clks;
+}
+
+void dss_clk_disable(enum dss_clock clks)
+{
+ if (cpu_is_omap34xx()) {
+ unsigned num_clks = count_clk_bits(clks);
+
+ BUG_ON(dss.num_clks_enabled < num_clks);
+
+ if (dss.num_clks_enabled == num_clks)
+ save_all_ctx();
+ }
+
+ dss_clk_disable_no_ctx(clks);
+}
+
+static void dss_clk_enable_all_no_ctx(void)
+{
+ enum dss_clock clks;
+
+ clks = DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_SYSCK | DSS_CLK_TVFCK;
+ if (cpu_is_omap34xx())
+ clks |= DSS_CLK_VIDFCK;
+ dss_clk_enable_no_ctx(clks);
+}
+
+static void dss_clk_disable_all_no_ctx(void)
+{
+ enum dss_clock clks;
+
+ clks = DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_SYSCK | DSS_CLK_TVFCK;
+ if (cpu_is_omap34xx())
+ clks |= DSS_CLK_VIDFCK;
+ dss_clk_disable_no_ctx(clks);
+}
+
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
+/* CLOCKS */
+static void core_dump_clocks(struct seq_file *s)
+{
+ int i;
+ struct clk *clocks[5] = {
+ dss.dss_ick,
+ dss.dss_fck,
+ dss.dss_sys_clk,
+ dss.dss_tv_fck,
+ dss.dss_video_fck
+ };
+
+ seq_printf(s, "- CORE -\n");
+
+ seq_printf(s, "internal clk count\t\t%u\n", dss.num_clks_enabled);
+
+ for (i = 0; i < 5; i++) {
+ if (!clocks[i])
+ continue;
+ seq_printf(s, "%-15s\t%lu\t%d\n",
+ clocks[i]->name,
+ clk_get_rate(clocks[i]),
+ clocks[i]->usecount);
+ }
+}
+#endif /* defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) */
+
+/* DEBUGFS */
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
+void dss_debug_dump_clocks(struct seq_file *s)
+{
+ core_dump_clocks(s);
+ dss_dump_clocks(s);
+ dispc_dump_clocks(s);
+#ifdef CONFIG_OMAP2_DSS_DSI
+ dsi_dump_clocks(s);
+#endif
+}
+#endif
+
+
+/* DSS HW IP initialisation */
+static int omap_dsshw_probe(struct platform_device *pdev)
+{
+ int r;
+
+ dss.pdev = pdev;
+
+ r = dss_get_clocks();
+ if (r)
+ goto err_clocks;
+
+ dss_clk_enable_all_no_ctx();
+
+ dss.ctx_id = dss_get_ctx_id();
+ DSSDBG("initial ctx id %u\n", dss.ctx_id);
+
+ r = dss_init();
+ if (r) {
+ DSSERR("Failed to initialize DSS\n");
+ goto err_dss;
+ }
+
+ r = dpi_init();
+ if (r) {
+ DSSERR("Failed to initialize DPI\n");
+ goto err_dpi;
+ }
+
+ r = sdi_init();
+ if (r) {
+ DSSERR("Failed to initialize SDI\n");
+ goto err_sdi;
+ }
+
+ dss_clk_disable_all_no_ctx();
+ return 0;
+err_sdi:
+ dpi_exit();
+err_dpi:
+ dss_exit();
+err_dss:
+ dss_clk_disable_all_no_ctx();
+ dss_put_clocks();
+err_clocks:
+ return r;
+}
+
+static int omap_dsshw_remove(struct platform_device *pdev)
+{
+
+ dss_exit();
+
+ /*
+ * As part of hwmod changes, DSS is not the only controller of dss
+ * clocks; hwmod framework itself will also enable clocks during hwmod
+ * init for dss, and autoidle is set in h/w for DSS. Hence, there's no
+ * need to disable clocks if their usecounts > 1.
+ */
+ WARN_ON(dss.num_clks_enabled > 0);
+
+ dss_put_clocks();
+ return 0;
+}
+
+static struct platform_driver omap_dsshw_driver = {
+ .probe = omap_dsshw_probe,
+ .remove = omap_dsshw_remove,
+ .driver = {
+ .name = "omapdss_dss",
+ .owner = THIS_MODULE,
+ },
+};
+
+int dss_init_platform_driver(void)
+{
+ return platform_driver_register(&omap_dsshw_driver);
+}
+
+void dss_uninit_platform_driver(void)
+{
+ return platform_driver_unregister(&omap_dsshw_driver);
+}
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index b394951120a..c2f582bb19c 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -97,8 +97,6 @@ extern unsigned int dss_debug;
#define FLD_MOD(orig, val, start, end) \
(((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
-#define DISPC_MAX_FCK 173000000
-
enum omap_burst_size {
OMAP_DSS_BURST_4x32 = 0,
OMAP_DSS_BURST_8x32 = 1,
@@ -112,17 +110,25 @@ enum omap_parallel_interface_mode {
};
enum dss_clock {
- DSS_CLK_ICK = 1 << 0,
- DSS_CLK_FCK1 = 1 << 1,
- DSS_CLK_FCK2 = 1 << 2,
- DSS_CLK_54M = 1 << 3,
- DSS_CLK_96M = 1 << 4,
+ DSS_CLK_ICK = 1 << 0, /* DSS_L3_ICLK and DSS_L4_ICLK */
+ DSS_CLK_FCK = 1 << 1, /* DSS1_ALWON_FCLK */
+ DSS_CLK_SYSCK = 1 << 2, /* DSS2_ALWON_FCLK */
+ DSS_CLK_TVFCK = 1 << 3, /* DSS_TV_FCLK */
+ DSS_CLK_VIDFCK = 1 << 4, /* DSS_96M_FCLK*/
};
enum dss_clk_source {
- DSS_SRC_DSI1_PLL_FCLK,
- DSS_SRC_DSI2_PLL_FCLK,
- DSS_SRC_DSS1_ALWON_FCLK,
+ DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK
+ * OMAP4: PLL1_CLK1 */
+ DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK
+ * OMAP4: PLL1_CLK2 */
+ DSS_CLK_SRC_FCK, /* OMAP2/3: DSS1_ALWON_FCLK
+ * OMAP4: DSS_FCLK */
+};
+
+enum dss_hdmi_venc_clk_source_select {
+ DSS_VENC_TV_CLK = 0,
+ DSS_HDMI_M_PCLK = 1,
};
struct dss_clock_info {
@@ -148,36 +154,42 @@ struct dsi_clock_info {
unsigned long fint;
unsigned long clkin4ddr;
unsigned long clkin;
- unsigned long dsi1_pll_fclk;
- unsigned long dsi2_pll_fclk;
-
+ unsigned long dsi_pll_hsdiv_dispc_clk; /* OMAP3: DSI1_PLL_CLK
+ * OMAP4: PLLx_CLK1 */
+ unsigned long dsi_pll_hsdiv_dsi_clk; /* OMAP3: DSI2_PLL_CLK
+ * OMAP4: PLLx_CLK2 */
unsigned long lp_clk;
/* dividers */
u16 regn;
u16 regm;
- u16 regm3;
- u16 regm4;
-
+ u16 regm_dispc; /* OMAP3: REGM3
+ * OMAP4: REGM4 */
+ u16 regm_dsi; /* OMAP3: REGM4
+ * OMAP4: REGM5 */
u16 lp_clk_div;
u8 highfreq;
- bool use_dss2_fck;
+ bool use_sys_clk;
+};
+
+/* HDMI PLL structure */
+struct hdmi_pll_info {
+ u16 regn;
+ u16 regm;
+ u32 regmf;
+ u16 regm2;
+ u16 regsd;
+ u16 dcofreq;
};
struct seq_file;
struct platform_device;
/* core */
-void dss_clk_enable(enum dss_clock clks);
-void dss_clk_disable(enum dss_clock clks);
-unsigned long dss_clk_get_rate(enum dss_clock clk);
-int dss_need_ctx_restore(void);
-void dss_dump_clocks(struct seq_file *s);
struct bus_type *dss_get_bus(void);
struct regulator *dss_get_vdds_dsi(void);
struct regulator *dss_get_vdds_sdi(void);
-struct regulator *dss_get_vdda_dac(void);
/* display */
int dss_suspend_all_devices(void);
@@ -214,13 +226,23 @@ void dss_overlay_setup_l4_manager(struct omap_overlay_manager *mgr);
void dss_recheck_connections(struct omap_dss_device *dssdev, bool force);
/* DSS */
-int dss_init(bool skip_init);
-void dss_exit(void);
+int dss_init_platform_driver(void);
+void dss_uninit_platform_driver(void);
+void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
void dss_save_context(void);
void dss_restore_context(void);
+void dss_clk_enable(enum dss_clock clks);
+void dss_clk_disable(enum dss_clock clks);
+unsigned long dss_clk_get_rate(enum dss_clock clk);
+int dss_need_ctx_restore(void);
+const char *dss_get_generic_clk_source_name(enum dss_clk_source clk_src);
+void dss_dump_clocks(struct seq_file *s);
void dss_dump_regs(struct seq_file *s);
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
+void dss_debug_dump_clocks(struct seq_file *s);
+#endif
void dss_sdi_init(u8 datapairs);
int dss_sdi_enable(void);
@@ -228,8 +250,11 @@ void dss_sdi_disable(void);
void dss_select_dispc_clk_source(enum dss_clk_source clk_src);
void dss_select_dsi_clk_source(enum dss_clk_source clk_src);
+void dss_select_lcd_clk_source(enum omap_channel channel,
+ enum dss_clk_source clk_src);
enum dss_clk_source dss_get_dispc_clk_source(void);
enum dss_clk_source dss_get_dsi_clk_source(void);
+enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel);
void dss_set_venc_output(enum omap_dss_venc_type type);
void dss_set_dac_pwrdn_bgz(bool enable);
@@ -244,11 +269,11 @@ int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
/* SDI */
#ifdef CONFIG_OMAP2_DSS_SDI
-int sdi_init(bool skip_init);
+int sdi_init(void);
void sdi_exit(void);
int sdi_init_display(struct omap_dss_device *display);
#else
-static inline int sdi_init(bool skip_init)
+static inline int sdi_init(void)
{
return 0;
}
@@ -259,8 +284,8 @@ static inline void sdi_exit(void)
/* DSI */
#ifdef CONFIG_OMAP2_DSS_DSI
-int dsi_init(struct platform_device *pdev);
-void dsi_exit(void);
+int dsi_init_platform_driver(void);
+void dsi_uninit_platform_driver(void);
void dsi_dump_clocks(struct seq_file *s);
void dsi_dump_irqs(struct seq_file *s);
@@ -271,7 +296,7 @@ void dsi_restore_context(void);
int dsi_init_display(struct omap_dss_device *display);
void dsi_irq_handler(void);
-unsigned long dsi_get_dsi1_pll_rate(void);
+unsigned long dsi_get_pll_hsdiv_dispc_rate(void);
int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo);
int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck,
struct dsi_clock_info *cinfo,
@@ -282,31 +307,36 @@ void dsi_pll_uninit(void);
void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
u32 fifo_size, enum omap_burst_size *burst_size,
u32 *fifo_low, u32 *fifo_high);
-void dsi_wait_dsi1_pll_active(void);
-void dsi_wait_dsi2_pll_active(void);
+void dsi_wait_pll_hsdiv_dispc_active(void);
+void dsi_wait_pll_hsdiv_dsi_active(void);
#else
-static inline int dsi_init(struct platform_device *pdev)
+static inline int dsi_init_platform_driver(void)
{
return 0;
}
-static inline void dsi_exit(void)
+static inline void dsi_uninit_platform_driver(void)
{
}
-static inline void dsi_wait_dsi1_pll_active(void)
+static inline unsigned long dsi_get_pll_hsdiv_dispc_rate(void)
{
+ WARN("%s: DSI not compiled in, returning rate as 0\n", __func__);
+ return 0;
}
-static inline void dsi_wait_dsi2_pll_active(void)
+static inline void dsi_wait_pll_hsdiv_dispc_active(void)
+{
+}
+static inline void dsi_wait_pll_hsdiv_dsi_active(void)
{
}
#endif
/* DPI */
#ifdef CONFIG_OMAP2_DSS_DPI
-int dpi_init(struct platform_device *pdev);
+int dpi_init(void);
void dpi_exit(void);
int dpi_init_display(struct omap_dss_device *dssdev);
#else
-static inline int dpi_init(struct platform_device *pdev)
+static inline int dpi_init(void)
{
return 0;
}
@@ -316,8 +346,8 @@ static inline void dpi_exit(void)
#endif
/* DISPC */
-int dispc_init(void);
-void dispc_exit(void);
+int dispc_init_platform_driver(void);
+void dispc_uninit_platform_driver(void);
void dispc_dump_clocks(struct seq_file *s);
void dispc_dump_irqs(struct seq_file *s);
void dispc_dump_regs(struct seq_file *s);
@@ -350,6 +380,7 @@ void dispc_set_plane_size(enum omap_plane plane, u16 width, u16 height);
void dispc_set_channel_out(enum omap_plane plane,
enum omap_channel channel_out);
+void dispc_enable_gamma_table(bool enable);
int dispc_setup_plane(enum omap_plane plane,
u32 paddr, u16 screen_width,
u16 pos_x, u16 pos_y,
@@ -409,24 +440,50 @@ int dispc_get_clock_div(enum omap_channel channel,
/* VENC */
#ifdef CONFIG_OMAP2_DSS_VENC
-int venc_init(struct platform_device *pdev);
-void venc_exit(void);
+int venc_init_platform_driver(void);
+void venc_uninit_platform_driver(void);
void venc_dump_regs(struct seq_file *s);
int venc_init_display(struct omap_dss_device *display);
#else
-static inline int venc_init(struct platform_device *pdev)
+static inline int venc_init_platform_driver(void)
+{
+ return 0;
+}
+static inline void venc_uninit_platform_driver(void)
+{
+}
+#endif
+
+/* HDMI */
+#ifdef CONFIG_OMAP4_DSS_HDMI
+int hdmi_init_platform_driver(void);
+void hdmi_uninit_platform_driver(void);
+int hdmi_init_display(struct omap_dss_device *dssdev);
+#else
+static inline int hdmi_init_display(struct omap_dss_device *dssdev)
+{
+ return 0;
+}
+static inline int hdmi_init_platform_driver(void)
{
return 0;
}
-static inline void venc_exit(void)
+static inline void hdmi_uninit_platform_driver(void)
{
}
#endif
+int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev);
+void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev);
+void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev);
+int omapdss_hdmi_display_check_timing(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+int hdmi_panel_init(void);
+void hdmi_panel_exit(void);
/* RFBI */
#ifdef CONFIG_OMAP2_DSS_RFBI
-int rfbi_init(void);
-void rfbi_exit(void);
+int rfbi_init_platform_driver(void);
+void rfbi_uninit_platform_driver(void);
void rfbi_dump_regs(struct seq_file *s);
int rfbi_configure(int rfbi_module, int bpp, int lines);
@@ -437,11 +494,11 @@ void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t);
unsigned long rfbi_get_max_tx_rate(void);
int rfbi_init_display(struct omap_dss_device *display);
#else
-static inline int rfbi_init(void)
+static inline int rfbi_init_platform_driver(void)
{
return 0;
}
-static inline void rfbi_exit(void)
+static inline void rfbi_uninit_platform_driver(void)
{
}
#endif
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index cf3ef696e14..b4f63d01180 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -25,14 +25,18 @@
#include <plat/display.h>
#include <plat/cpu.h>
+#include "dss.h"
#include "dss_features.h"
/* Defines a generic omap register field */
struct dss_reg_field {
- enum dss_feat_reg_field id;
u8 start, end;
};
+struct dss_param_range {
+ int min, max;
+};
+
struct omap_dss_features {
const struct dss_reg_field *reg_fields;
const int num_reg_fields;
@@ -43,29 +47,68 @@ struct omap_dss_features {
const int num_ovls;
const enum omap_display_type *supported_displays;
const enum omap_color_mode *supported_color_modes;
+ const char * const *clksrc_names;
+ const struct dss_param_range *dss_params;
};
/* This struct is assigned to one of the below during initialization */
static struct omap_dss_features *omap_current_dss_features;
static const struct dss_reg_field omap2_dss_reg_fields[] = {
- { FEAT_REG_FIRHINC, 11, 0 },
- { FEAT_REG_FIRVINC, 27, 16 },
- { FEAT_REG_FIFOLOWTHRESHOLD, 8, 0 },
- { FEAT_REG_FIFOHIGHTHRESHOLD, 24, 16 },
- { FEAT_REG_FIFOSIZE, 8, 0 },
+ [FEAT_REG_FIRHINC] = { 11, 0 },
+ [FEAT_REG_FIRVINC] = { 27, 16 },
+ [FEAT_REG_FIFOLOWTHRESHOLD] = { 8, 0 },
+ [FEAT_REG_FIFOHIGHTHRESHOLD] = { 24, 16 },
+ [FEAT_REG_FIFOSIZE] = { 8, 0 },
+ [FEAT_REG_HORIZONTALACCU] = { 9, 0 },
+ [FEAT_REG_VERTICALACCU] = { 25, 16 },
+ [FEAT_REG_DISPC_CLK_SWITCH] = { 0, 0 },
+ [FEAT_REG_DSIPLL_REGN] = { 0, 0 },
+ [FEAT_REG_DSIPLL_REGM] = { 0, 0 },
+ [FEAT_REG_DSIPLL_REGM_DISPC] = { 0, 0 },
+ [FEAT_REG_DSIPLL_REGM_DSI] = { 0, 0 },
};
static const struct dss_reg_field omap3_dss_reg_fields[] = {
- { FEAT_REG_FIRHINC, 12, 0 },
- { FEAT_REG_FIRVINC, 28, 16 },
- { FEAT_REG_FIFOLOWTHRESHOLD, 11, 0 },
- { FEAT_REG_FIFOHIGHTHRESHOLD, 27, 16 },
- { FEAT_REG_FIFOSIZE, 10, 0 },
+ [FEAT_REG_FIRHINC] = { 12, 0 },
+ [FEAT_REG_FIRVINC] = { 28, 16 },
+ [FEAT_REG_FIFOLOWTHRESHOLD] = { 11, 0 },
+ [FEAT_REG_FIFOHIGHTHRESHOLD] = { 27, 16 },
+ [FEAT_REG_FIFOSIZE] = { 10, 0 },
+ [FEAT_REG_HORIZONTALACCU] = { 9, 0 },
+ [FEAT_REG_VERTICALACCU] = { 25, 16 },
+ [FEAT_REG_DISPC_CLK_SWITCH] = { 0, 0 },
+ [FEAT_REG_DSIPLL_REGN] = { 7, 1 },
+ [FEAT_REG_DSIPLL_REGM] = { 18, 8 },
+ [FEAT_REG_DSIPLL_REGM_DISPC] = { 22, 19 },
+ [FEAT_REG_DSIPLL_REGM_DSI] = { 26, 23 },
+};
+
+static const struct dss_reg_field omap4_dss_reg_fields[] = {
+ [FEAT_REG_FIRHINC] = { 12, 0 },
+ [FEAT_REG_FIRVINC] = { 28, 16 },
+ [FEAT_REG_FIFOLOWTHRESHOLD] = { 15, 0 },
+ [FEAT_REG_FIFOHIGHTHRESHOLD] = { 31, 16 },
+ [FEAT_REG_FIFOSIZE] = { 15, 0 },
+ [FEAT_REG_HORIZONTALACCU] = { 10, 0 },
+ [FEAT_REG_VERTICALACCU] = { 26, 16 },
+ [FEAT_REG_DISPC_CLK_SWITCH] = { 9, 8 },
+ [FEAT_REG_DSIPLL_REGN] = { 8, 1 },
+ [FEAT_REG_DSIPLL_REGM] = { 20, 9 },
+ [FEAT_REG_DSIPLL_REGM_DISPC] = { 25, 21 },
+ [FEAT_REG_DSIPLL_REGM_DSI] = { 30, 26 },
};
static const enum omap_display_type omap2_dss_supported_displays[] = {
/* OMAP_DSS_CHANNEL_LCD */
+ OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI,
+
+ /* OMAP_DSS_CHANNEL_DIGIT */
+ OMAP_DISPLAY_TYPE_VENC,
+};
+
+static const enum omap_display_type omap3430_dss_supported_displays[] = {
+ /* OMAP_DSS_CHANNEL_LCD */
OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
OMAP_DISPLAY_TYPE_SDI | OMAP_DISPLAY_TYPE_DSI,
@@ -73,10 +116,10 @@ static const enum omap_display_type omap2_dss_supported_displays[] = {
OMAP_DISPLAY_TYPE_VENC,
};
-static const enum omap_display_type omap3_dss_supported_displays[] = {
+static const enum omap_display_type omap3630_dss_supported_displays[] = {
/* OMAP_DSS_CHANNEL_LCD */
OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
- OMAP_DISPLAY_TYPE_SDI | OMAP_DISPLAY_TYPE_DSI,
+ OMAP_DISPLAY_TYPE_DSI,
/* OMAP_DSS_CHANNEL_DIGIT */
OMAP_DISPLAY_TYPE_VENC,
@@ -87,7 +130,7 @@ static const enum omap_display_type omap4_dss_supported_displays[] = {
OMAP_DISPLAY_TYPE_DBI | OMAP_DISPLAY_TYPE_DSI,
/* OMAP_DSS_CHANNEL_DIGIT */
- OMAP_DISPLAY_TYPE_VENC,
+ OMAP_DISPLAY_TYPE_VENC | OMAP_DISPLAY_TYPE_HDMI,
/* OMAP_DSS_CHANNEL_LCD2 */
OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
@@ -122,9 +165,11 @@ static const enum omap_color_mode omap3_dss_supported_color_modes[] = {
OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32,
/* OMAP_DSS_VIDEO1 */
- OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P |
OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_RGB16 |
- OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_UYVY,
+ OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P |
+ OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_UYVY |
+ OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBA32 |
+ OMAP_DSS_COLOR_RGBX32,
/* OMAP_DSS_VIDEO2 */
OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 |
@@ -134,6 +179,54 @@ static const enum omap_color_mode omap3_dss_supported_color_modes[] = {
OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32,
};
+static const char * const omap2_dss_clk_source_names[] = {
+ [DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "N/A",
+ [DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "N/A",
+ [DSS_CLK_SRC_FCK] = "DSS_FCLK1",
+};
+
+static const char * const omap3_dss_clk_source_names[] = {
+ [DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI1_PLL_FCLK",
+ [DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI2_PLL_FCLK",
+ [DSS_CLK_SRC_FCK] = "DSS1_ALWON_FCLK",
+};
+
+static const char * const omap4_dss_clk_source_names[] = {
+ [DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "PLL1_CLK1",
+ [DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "PLL1_CLK2",
+ [DSS_CLK_SRC_FCK] = "DSS_FCLK",
+};
+
+static const struct dss_param_range omap2_dss_param_range[] = {
+ [FEAT_PARAM_DSS_FCK] = { 0, 173000000 },
+ [FEAT_PARAM_DSIPLL_REGN] = { 0, 0 },
+ [FEAT_PARAM_DSIPLL_REGM] = { 0, 0 },
+ [FEAT_PARAM_DSIPLL_REGM_DISPC] = { 0, 0 },
+ [FEAT_PARAM_DSIPLL_REGM_DSI] = { 0, 0 },
+ [FEAT_PARAM_DSIPLL_FINT] = { 0, 0 },
+ [FEAT_PARAM_DSIPLL_LPDIV] = { 0, 0 },
+};
+
+static const struct dss_param_range omap3_dss_param_range[] = {
+ [FEAT_PARAM_DSS_FCK] = { 0, 173000000 },
+ [FEAT_PARAM_DSIPLL_REGN] = { 0, (1 << 7) - 1 },
+ [FEAT_PARAM_DSIPLL_REGM] = { 0, (1 << 11) - 1 },
+ [FEAT_PARAM_DSIPLL_REGM_DISPC] = { 0, (1 << 4) - 1 },
+ [FEAT_PARAM_DSIPLL_REGM_DSI] = { 0, (1 << 4) - 1 },
+ [FEAT_PARAM_DSIPLL_FINT] = { 750000, 2100000 },
+ [FEAT_PARAM_DSIPLL_LPDIV] = { 1, (1 << 13) - 1},
+};
+
+static const struct dss_param_range omap4_dss_param_range[] = {
+ [FEAT_PARAM_DSS_FCK] = { 0, 186000000 },
+ [FEAT_PARAM_DSIPLL_REGN] = { 0, (1 << 8) - 1 },
+ [FEAT_PARAM_DSIPLL_REGM] = { 0, (1 << 12) - 1 },
+ [FEAT_PARAM_DSIPLL_REGM_DISPC] = { 0, (1 << 5) - 1 },
+ [FEAT_PARAM_DSIPLL_REGM_DSI] = { 0, (1 << 5) - 1 },
+ [FEAT_PARAM_DSIPLL_FINT] = { 500000, 2500000 },
+ [FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 },
+};
+
/* OMAP2 DSS Features */
static struct omap_dss_features omap2_dss_features = {
.reg_fields = omap2_dss_reg_fields,
@@ -141,12 +234,15 @@ static struct omap_dss_features omap2_dss_features = {
.has_feature =
FEAT_LCDENABLEPOL | FEAT_LCDENABLESIGNAL |
- FEAT_PCKFREEENABLE | FEAT_FUNCGATED,
+ FEAT_PCKFREEENABLE | FEAT_FUNCGATED |
+ FEAT_ROWREPEATENABLE | FEAT_RESIZECONF,
.num_mgrs = 2,
.num_ovls = 3,
.supported_displays = omap2_dss_supported_displays,
.supported_color_modes = omap2_dss_supported_color_modes,
+ .clksrc_names = omap2_dss_clk_source_names,
+ .dss_params = omap2_dss_param_range,
};
/* OMAP3 DSS Features */
@@ -157,12 +253,16 @@ static struct omap_dss_features omap3430_dss_features = {
.has_feature =
FEAT_GLOBAL_ALPHA | FEAT_LCDENABLEPOL |
FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE |
- FEAT_FUNCGATED,
+ FEAT_FUNCGATED | FEAT_ROWREPEATENABLE |
+ FEAT_LINEBUFFERSPLIT | FEAT_RESIZECONF |
+ FEAT_DSI_PLL_FREQSEL | FEAT_DSI_LDO_STATUS,
.num_mgrs = 2,
.num_ovls = 3,
- .supported_displays = omap3_dss_supported_displays,
+ .supported_displays = omap3430_dss_supported_displays,
.supported_color_modes = omap3_dss_supported_color_modes,
+ .clksrc_names = omap3_dss_clk_source_names,
+ .dss_params = omap3_dss_param_range,
};
static struct omap_dss_features omap3630_dss_features = {
@@ -172,27 +272,36 @@ static struct omap_dss_features omap3630_dss_features = {
.has_feature =
FEAT_GLOBAL_ALPHA | FEAT_LCDENABLEPOL |
FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE |
- FEAT_PRE_MULT_ALPHA | FEAT_FUNCGATED,
+ FEAT_PRE_MULT_ALPHA | FEAT_FUNCGATED |
+ FEAT_ROWREPEATENABLE | FEAT_LINEBUFFERSPLIT |
+ FEAT_RESIZECONF | FEAT_DSI_PLL_FREQSEL |
+ FEAT_DSI_LDO_STATUS,
.num_mgrs = 2,
.num_ovls = 3,
- .supported_displays = omap3_dss_supported_displays,
+ .supported_displays = omap3630_dss_supported_displays,
.supported_color_modes = omap3_dss_supported_color_modes,
+ .clksrc_names = omap3_dss_clk_source_names,
+ .dss_params = omap3_dss_param_range,
};
/* OMAP4 DSS Features */
static struct omap_dss_features omap4_dss_features = {
- .reg_fields = omap3_dss_reg_fields,
- .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields),
+ .reg_fields = omap4_dss_reg_fields,
+ .num_reg_fields = ARRAY_SIZE(omap4_dss_reg_fields),
.has_feature =
FEAT_GLOBAL_ALPHA | FEAT_PRE_MULT_ALPHA |
- FEAT_MGR_LCD2,
+ FEAT_MGR_LCD2 | FEAT_GLOBAL_ALPHA_VID1 |
+ FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC |
+ FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH,
.num_mgrs = 3,
.num_ovls = 3,
.supported_displays = omap4_dss_supported_displays,
.supported_color_modes = omap3_dss_supported_color_modes,
+ .clksrc_names = omap4_dss_clk_source_names,
+ .dss_params = omap4_dss_param_range,
};
/* Functions returning values related to a DSS feature */
@@ -206,6 +315,16 @@ int dss_feat_get_num_ovls(void)
return omap_current_dss_features->num_ovls;
}
+unsigned long dss_feat_get_param_min(enum dss_range_param param)
+{
+ return omap_current_dss_features->dss_params[param].min;
+}
+
+unsigned long dss_feat_get_param_max(enum dss_range_param param)
+{
+ return omap_current_dss_features->dss_params[param].max;
+}
+
enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel)
{
return omap_current_dss_features->supported_displays[channel];
@@ -223,6 +342,11 @@ bool dss_feat_color_mode_supported(enum omap_plane plane,
color_mode;
}
+const char *dss_feat_get_clk_source_name(enum dss_clk_source id)
+{
+ return omap_current_dss_features->clksrc_names[id];
+}
+
/* DSS has_feature check */
bool dss_has_feature(enum dss_feat_id id)
{
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index b9c70be9258..c5c0b4001ab 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -22,17 +22,28 @@
#define MAX_DSS_MANAGERS 3
#define MAX_DSS_OVERLAYS 3
+#define MAX_DSS_LCD_MANAGERS 2
/* DSS has feature id */
enum dss_feat_id {
- FEAT_GLOBAL_ALPHA = 1 << 0,
- FEAT_GLOBAL_ALPHA_VID1 = 1 << 1,
- FEAT_PRE_MULT_ALPHA = 1 << 2,
- FEAT_LCDENABLEPOL = 1 << 3,
- FEAT_LCDENABLESIGNAL = 1 << 4,
- FEAT_PCKFREEENABLE = 1 << 5,
- FEAT_FUNCGATED = 1 << 6,
- FEAT_MGR_LCD2 = 1 << 7,
+ FEAT_GLOBAL_ALPHA = 1 << 0,
+ FEAT_GLOBAL_ALPHA_VID1 = 1 << 1,
+ FEAT_PRE_MULT_ALPHA = 1 << 2,
+ FEAT_LCDENABLEPOL = 1 << 3,
+ FEAT_LCDENABLESIGNAL = 1 << 4,
+ FEAT_PCKFREEENABLE = 1 << 5,
+ FEAT_FUNCGATED = 1 << 6,
+ FEAT_MGR_LCD2 = 1 << 7,
+ FEAT_LINEBUFFERSPLIT = 1 << 8,
+ FEAT_ROWREPEATENABLE = 1 << 9,
+ FEAT_RESIZECONF = 1 << 10,
+ /* Independent core clk divider */
+ FEAT_CORE_CLK_DIV = 1 << 11,
+ FEAT_LCD_CLK_SRC = 1 << 12,
+ FEAT_DSI_PLL_FREQSEL = 1 << 13,
+ FEAT_DSI_LDO_STATUS = 1 << 14,
+ FEAT_DSI_DCS_CMD_CONFIG_VC = 1 << 15,
+ FEAT_DSI_VC_OCP_WIDTH = 1 << 16,
};
/* DSS register field id */
@@ -42,15 +53,35 @@ enum dss_feat_reg_field {
FEAT_REG_FIFOHIGHTHRESHOLD,
FEAT_REG_FIFOLOWTHRESHOLD,
FEAT_REG_FIFOSIZE,
+ FEAT_REG_HORIZONTALACCU,
+ FEAT_REG_VERTICALACCU,
+ FEAT_REG_DISPC_CLK_SWITCH,
+ FEAT_REG_DSIPLL_REGN,
+ FEAT_REG_DSIPLL_REGM,
+ FEAT_REG_DSIPLL_REGM_DISPC,
+ FEAT_REG_DSIPLL_REGM_DSI,
+};
+
+enum dss_range_param {
+ FEAT_PARAM_DSS_FCK,
+ FEAT_PARAM_DSIPLL_REGN,
+ FEAT_PARAM_DSIPLL_REGM,
+ FEAT_PARAM_DSIPLL_REGM_DISPC,
+ FEAT_PARAM_DSIPLL_REGM_DSI,
+ FEAT_PARAM_DSIPLL_FINT,
+ FEAT_PARAM_DSIPLL_LPDIV,
};
/* DSS Feature Functions */
int dss_feat_get_num_mgrs(void);
int dss_feat_get_num_ovls(void);
+unsigned long dss_feat_get_param_min(enum dss_range_param param);
+unsigned long dss_feat_get_param_max(enum dss_range_param param);
enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel);
enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane);
bool dss_feat_color_mode_supported(enum omap_plane plane,
enum omap_color_mode color_mode);
+const char *dss_feat_get_clk_source_name(enum dss_clk_source id);
bool dss_has_feature(enum dss_feat_id id);
void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end);
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
new file mode 100644
index 00000000000..0d44f070ef3
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -0,0 +1,1332 @@
+/*
+ * hdmi.c
+ *
+ * HDMI interface DSS driver setting for TI's OMAP4 family of processor.
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Authors: Yong Zhi
+ * Mythri pk <mythripk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "HDMI"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <plat/display.h>
+
+#include "dss.h"
+#include "hdmi.h"
+
+static struct {
+ struct mutex lock;
+ struct omap_display_platform_data *pdata;
+ struct platform_device *pdev;
+ void __iomem *base_wp; /* HDMI wrapper */
+ int code;
+ int mode;
+ u8 edid[HDMI_EDID_MAX_LENGTH];
+ u8 edid_set;
+ bool custom_set;
+ struct hdmi_config cfg;
+} hdmi;
+
+/*
+ * Logic for the below structure :
+ * user enters the CEA or VESA timings by specifying the HDMI/DVI code.
+ * There is a correspondence between CEA/VESA timing and code, please
+ * refer to section 6.3 in HDMI 1.3 specification for timing code.
+ *
+ * In the below structure, cea_vesa_timings corresponds to all OMAP4
+ * supported CEA and VESA timing values.code_cea corresponds to the CEA
+ * code, It is used to get the timing from cea_vesa_timing array.Similarly
+ * with code_vesa. Code_index is used for back mapping, that is once EDID
+ * is read from the TV, EDID is parsed to find the timing values and then
+ * map it to corresponding CEA or VESA index.
+ */
+
+static const struct hdmi_timings cea_vesa_timings[OMAP_HDMI_TIMINGS_NB] = {
+ { {640, 480, 25200, 96, 16, 48, 2, 10, 33} , 0 , 0},
+ { {1280, 720, 74250, 40, 440, 220, 5, 5, 20}, 1, 1},
+ { {1280, 720, 74250, 40, 110, 220, 5, 5, 20}, 1, 1},
+ { {720, 480, 27027, 62, 16, 60, 6, 9, 30}, 0, 0},
+ { {2880, 576, 108000, 256, 48, 272, 5, 5, 39}, 0, 0},
+ { {1440, 240, 27027, 124, 38, 114, 3, 4, 15}, 0, 0},
+ { {1440, 288, 27000, 126, 24, 138, 3, 2, 19}, 0, 0},
+ { {1920, 540, 74250, 44, 528, 148, 5, 2, 15}, 1, 1},
+ { {1920, 540, 74250, 44, 88, 148, 5, 2, 15}, 1, 1},
+ { {1920, 1080, 148500, 44, 88, 148, 5, 4, 36}, 1, 1},
+ { {720, 576, 27000, 64, 12, 68, 5, 5, 39}, 0, 0},
+ { {1440, 576, 54000, 128, 24, 136, 5, 5, 39}, 0, 0},
+ { {1920, 1080, 148500, 44, 528, 148, 5, 4, 36}, 1, 1},
+ { {2880, 480, 108108, 248, 64, 240, 6, 9, 30}, 0, 0},
+ { {1920, 1080, 74250, 44, 638, 148, 5, 4, 36}, 1, 1},
+ /* VESA From Here */
+ { {640, 480, 25175, 96, 16, 48, 2 , 11, 31}, 0, 0},
+ { {800, 600, 40000, 128, 40, 88, 4 , 1, 23}, 1, 1},
+ { {848, 480, 33750, 112, 16, 112, 8 , 6, 23}, 1, 1},
+ { {1280, 768, 79500, 128, 64, 192, 7 , 3, 20}, 1, 0},
+ { {1280, 800, 83500, 128, 72, 200, 6 , 3, 22}, 1, 0},
+ { {1360, 768, 85500, 112, 64, 256, 6 , 3, 18}, 1, 1},
+ { {1280, 960, 108000, 112, 96, 312, 3 , 1, 36}, 1, 1},
+ { {1280, 1024, 108000, 112, 48, 248, 3 , 1, 38}, 1, 1},
+ { {1024, 768, 65000, 136, 24, 160, 6, 3, 29}, 0, 0},
+ { {1400, 1050, 121750, 144, 88, 232, 4, 3, 32}, 1, 0},
+ { {1440, 900, 106500, 152, 80, 232, 6, 3, 25}, 1, 0},
+ { {1680, 1050, 146250, 176 , 104, 280, 6, 3, 30}, 1, 0},
+ { {1366, 768, 85500, 143, 70, 213, 3, 3, 24}, 1, 1},
+ { {1920, 1080, 148500, 44, 148, 80, 5, 4, 36}, 1, 1},
+ { {1280, 768, 68250, 32, 48, 80, 7, 3, 12}, 0, 1},
+ { {1400, 1050, 101000, 32, 48, 80, 4, 3, 23}, 0, 1},
+ { {1680, 1050, 119000, 32, 48, 80, 6, 3, 21}, 0, 1},
+ { {1280, 800, 79500, 32, 48, 80, 6, 3, 14}, 0, 1},
+ { {1280, 720, 74250, 40, 110, 220, 5, 5, 20}, 1, 1}
+};
+
+/*
+ * This is a static mapping array which maps the timing values
+ * with corresponding CEA / VESA code
+ */
+static const int code_index[OMAP_HDMI_TIMINGS_NB] = {
+ 1, 19, 4, 2, 37, 6, 21, 20, 5, 16, 17, 29, 31, 35, 32,
+ /* <--15 CEA 17--> vesa*/
+ 4, 9, 0xE, 0x17, 0x1C, 0x27, 0x20, 0x23, 0x10, 0x2A,
+ 0X2F, 0x3A, 0X51, 0X52, 0x16, 0x29, 0x39, 0x1B
+};
+
+/*
+ * This is reverse static mapping which maps the CEA / VESA code
+ * to the corresponding timing values
+ */
+static const int code_cea[39] = {
+ -1, 0, 3, 3, 2, 8, 5, 5, -1, -1,
+ -1, -1, -1, -1, -1, -1, 9, 10, 10, 1,
+ 7, 6, 6, -1, -1, -1, -1, -1, -1, 11,
+ 11, 12, 14, -1, -1, 13, 13, 4, 4
+};
+
+static const int code_vesa[85] = {
+ -1, -1, -1, -1, 15, -1, -1, -1, -1, 16,
+ -1, -1, -1, -1, 17, -1, 23, -1, -1, -1,
+ -1, -1, 29, 18, -1, -1, -1, 32, 19, -1,
+ -1, -1, 21, -1, -1, 22, -1, -1, -1, 20,
+ -1, 30, 24, -1, -1, -1, -1, 25, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 31, 26, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 27, 28, -1, 33};
+
+static const u8 edid_header[8] = {0x0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0};
+
+static inline void hdmi_write_reg(const struct hdmi_reg idx, u32 val)
+{
+ __raw_writel(val, hdmi.base_wp + idx.idx);
+}
+
+static inline u32 hdmi_read_reg(const struct hdmi_reg idx)
+{
+ return __raw_readl(hdmi.base_wp + idx.idx);
+}
+
+static inline int hdmi_wait_for_bit_change(const struct hdmi_reg idx,
+ int b2, int b1, u32 val)
+{
+ u32 t = 0;
+ while (val != REG_GET(idx, b2, b1)) {
+ udelay(1);
+ if (t++ > 10000)
+ return !val;
+ }
+ return val;
+}
+
+int hdmi_init_display(struct omap_dss_device *dssdev)
+{
+ DSSDBG("init_display\n");
+
+ return 0;
+}
+
+static int hdmi_pll_init(enum hdmi_clk_refsel refsel, int dcofreq,
+ struct hdmi_pll_info *fmt, u16 sd)
+{
+ u32 r;
+
+ /* PLL start always use manual mode */
+ REG_FLD_MOD(PLLCTRL_PLL_CONTROL, 0x0, 0, 0);
+
+ r = hdmi_read_reg(PLLCTRL_CFG1);
+ r = FLD_MOD(r, fmt->regm, 20, 9); /* CFG1_PLL_REGM */
+ r = FLD_MOD(r, fmt->regn, 8, 1); /* CFG1_PLL_REGN */
+
+ hdmi_write_reg(PLLCTRL_CFG1, r);
+
+ r = hdmi_read_reg(PLLCTRL_CFG2);
+
+ r = FLD_MOD(r, 0x0, 12, 12); /* PLL_HIGHFREQ divide by 2 */
+ r = FLD_MOD(r, 0x1, 13, 13); /* PLL_REFEN */
+ r = FLD_MOD(r, 0x0, 14, 14); /* PHY_CLKINEN de-assert during locking */
+
+ if (dcofreq) {
+ /* divider programming for frequency beyond 1000Mhz */
+ REG_FLD_MOD(PLLCTRL_CFG3, sd, 17, 10);
+ r = FLD_MOD(r, 0x4, 3, 1); /* 1000MHz and 2000MHz */
+ } else {
+ r = FLD_MOD(r, 0x2, 3, 1); /* 500MHz and 1000MHz */
+ }
+
+ hdmi_write_reg(PLLCTRL_CFG2, r);
+
+ r = hdmi_read_reg(PLLCTRL_CFG4);
+ r = FLD_MOD(r, fmt->regm2, 24, 18);
+ r = FLD_MOD(r, fmt->regmf, 17, 0);
+
+ hdmi_write_reg(PLLCTRL_CFG4, r);
+
+ /* go now */
+ REG_FLD_MOD(PLLCTRL_PLL_GO, 0x1, 0, 0);
+
+ /* wait for bit change */
+ if (hdmi_wait_for_bit_change(PLLCTRL_PLL_GO, 0, 0, 1) != 1) {
+ DSSERR("PLL GO bit not set\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Wait till the lock bit is set in PLL status */
+ if (hdmi_wait_for_bit_change(PLLCTRL_PLL_STATUS, 1, 1, 1) != 1) {
+ DSSWARN("cannot lock PLL\n");
+ DSSWARN("CFG1 0x%x\n",
+ hdmi_read_reg(PLLCTRL_CFG1));
+ DSSWARN("CFG2 0x%x\n",
+ hdmi_read_reg(PLLCTRL_CFG2));
+ DSSWARN("CFG4 0x%x\n",
+ hdmi_read_reg(PLLCTRL_CFG4));
+ return -ETIMEDOUT;
+ }
+
+ DSSDBG("PLL locked!\n");
+
+ return 0;
+}
+
+/* PHY_PWR_CMD */
+static int hdmi_set_phy_pwr(enum hdmi_phy_pwr val)
+{
+ /* Command for power control of HDMI PHY */
+ REG_FLD_MOD(HDMI_WP_PWR_CTRL, val, 7, 6);
+
+ /* Status of the power control of HDMI PHY */
+ if (hdmi_wait_for_bit_change(HDMI_WP_PWR_CTRL, 5, 4, val) != val) {
+ DSSERR("Failed to set PHY power mode to %d\n", val);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/* PLL_PWR_CMD */
+static int hdmi_set_pll_pwr(enum hdmi_pll_pwr val)
+{
+ /* Command for power control of HDMI PLL */
+ REG_FLD_MOD(HDMI_WP_PWR_CTRL, val, 3, 2);
+
+ /* wait till PHY_PWR_STATUS is set */
+ if (hdmi_wait_for_bit_change(HDMI_WP_PWR_CTRL, 1, 0, val) != val) {
+ DSSERR("Failed to set PHY_PWR_STATUS\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int hdmi_pll_reset(void)
+{
+ /* SYSRESET controlled by power FSM */
+ REG_FLD_MOD(PLLCTRL_PLL_CONTROL, 0x0, 3, 3);
+
+ /* READ 0x0 reset is in progress */
+ if (hdmi_wait_for_bit_change(PLLCTRL_PLL_STATUS, 0, 0, 1) != 1) {
+ DSSERR("Failed to sysreset PLL\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int hdmi_phy_init(void)
+{
+ u16 r = 0;
+
+ r = hdmi_set_phy_pwr(HDMI_PHYPWRCMD_LDOON);
+ if (r)
+ return r;
+
+ r = hdmi_set_phy_pwr(HDMI_PHYPWRCMD_TXON);
+ if (r)
+ return r;
+
+ /*
+ * Read address 0 in order to get the SCP reset done completed
+ * Dummy access performed to make sure reset is done
+ */
+ hdmi_read_reg(HDMI_TXPHY_TX_CTRL);
+
+ /*
+ * Write to phy address 0 to configure the clock
+ * use HFBITCLK write HDMI_TXPHY_TX_CONTROL_FREQOUT field
+ */
+ REG_FLD_MOD(HDMI_TXPHY_TX_CTRL, 0x1, 31, 30);
+
+ /* Write to phy address 1 to start HDMI line (TXVALID and TMDSCLKEN) */
+ hdmi_write_reg(HDMI_TXPHY_DIGITAL_CTRL, 0xF0000000);
+
+ /* Setup max LDO voltage */
+ REG_FLD_MOD(HDMI_TXPHY_POWER_CTRL, 0xB, 3, 0);
+
+ /* Write to phy address 3 to change the polarity control */
+ REG_FLD_MOD(HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27);
+
+ return 0;
+}
+
+static int hdmi_wait_softreset(void)
+{
+ /* reset W1 */
+ REG_FLD_MOD(HDMI_WP_SYSCONFIG, 0x1, 0, 0);
+
+ /* wait till SOFTRESET == 0 */
+ if (hdmi_wait_for_bit_change(HDMI_WP_SYSCONFIG, 0, 0, 0) != 0) {
+ DSSERR("sysconfig reset failed\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int hdmi_pll_program(struct hdmi_pll_info *fmt)
+{
+ u16 r = 0;
+ enum hdmi_clk_refsel refsel;
+
+ /* wait for wrapper reset */
+ r = hdmi_wait_softreset();
+ if (r)
+ return r;
+
+ r = hdmi_set_pll_pwr(HDMI_PLLPWRCMD_ALLOFF);
+ if (r)
+ return r;
+
+ r = hdmi_set_pll_pwr(HDMI_PLLPWRCMD_BOTHON_ALLCLKS);
+ if (r)
+ return r;
+
+ r = hdmi_pll_reset();
+ if (r)
+ return r;
+
+ refsel = HDMI_REFSEL_SYSCLK;
+
+ r = hdmi_pll_init(refsel, fmt->dcofreq, fmt, fmt->regsd);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static void hdmi_phy_off(void)
+{
+ hdmi_set_phy_pwr(HDMI_PHYPWRCMD_OFF);
+}
+
+static int hdmi_core_ddc_edid(u8 *pedid, int ext)
+{
+ u32 i, j;
+ char checksum = 0;
+ u32 offset = 0;
+
+ /* Turn on CLK for DDC */
+ REG_FLD_MOD(HDMI_CORE_AV_DPD, 0x7, 2, 0);
+
+ /*
+ * SW HACK : Without the Delay DDC(i2c bus) reads 0 values /
+ * right shifted values( The behavior is not consistent and seen only
+ * with some TV's)
+ */
+ usleep_range(800, 1000);
+
+ if (!ext) {
+ /* Clk SCL Devices */
+ REG_FLD_MOD(HDMI_CORE_DDC_CMD, 0xA, 3, 0);
+
+ /* HDMI_CORE_DDC_STATUS_IN_PROG */
+ if (hdmi_wait_for_bit_change(HDMI_CORE_DDC_STATUS,
+ 4, 4, 0) != 0) {
+ DSSERR("Failed to program DDC\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Clear FIFO */
+ REG_FLD_MOD(HDMI_CORE_DDC_CMD, 0x9, 3, 0);
+
+ /* HDMI_CORE_DDC_STATUS_IN_PROG */
+ if (hdmi_wait_for_bit_change(HDMI_CORE_DDC_STATUS,
+ 4, 4, 0) != 0) {
+ DSSERR("Failed to program DDC\n");
+ return -ETIMEDOUT;
+ }
+
+ } else {
+ if (ext % 2 != 0)
+ offset = 0x80;
+ }
+
+ /* Load Segment Address Register */
+ REG_FLD_MOD(HDMI_CORE_DDC_SEGM, ext/2, 7, 0);
+
+ /* Load Slave Address Register */
+ REG_FLD_MOD(HDMI_CORE_DDC_ADDR, 0xA0 >> 1, 7, 1);
+
+ /* Load Offset Address Register */
+ REG_FLD_MOD(HDMI_CORE_DDC_OFFSET, offset, 7, 0);
+
+ /* Load Byte Count */
+ REG_FLD_MOD(HDMI_CORE_DDC_COUNT1, 0x80, 7, 0);
+ REG_FLD_MOD(HDMI_CORE_DDC_COUNT2, 0x0, 1, 0);
+
+ /* Set DDC_CMD */
+ if (ext)
+ REG_FLD_MOD(HDMI_CORE_DDC_CMD, 0x4, 3, 0);
+ else
+ REG_FLD_MOD(HDMI_CORE_DDC_CMD, 0x2, 3, 0);
+
+ /* HDMI_CORE_DDC_STATUS_BUS_LOW */
+ if (REG_GET(HDMI_CORE_DDC_STATUS, 6, 6) == 1) {
+ DSSWARN("I2C Bus Low?\n");
+ return -EIO;
+ }
+ /* HDMI_CORE_DDC_STATUS_NO_ACK */
+ if (REG_GET(HDMI_CORE_DDC_STATUS, 5, 5) == 1) {
+ DSSWARN("I2C No Ack\n");
+ return -EIO;
+ }
+
+ i = ext * 128;
+ j = 0;
+ while (((REG_GET(HDMI_CORE_DDC_STATUS, 4, 4) == 1) ||
+ (REG_GET(HDMI_CORE_DDC_STATUS, 2, 2) == 0)) &&
+ j < 128) {
+
+ if (REG_GET(HDMI_CORE_DDC_STATUS, 2, 2) == 0) {
+ /* FIFO not empty */
+ pedid[i++] = REG_GET(HDMI_CORE_DDC_DATA, 7, 0);
+ j++;
+ }
+ }
+
+ for (j = 0; j < 128; j++)
+ checksum += pedid[j];
+
+ if (checksum != 0) {
+ DSSERR("E-EDID checksum failed!!\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int read_edid(u8 *pedid, u16 max_length)
+{
+ int r = 0, n = 0, i = 0;
+ int max_ext_blocks = (max_length / 128) - 1;
+
+ r = hdmi_core_ddc_edid(pedid, 0);
+ if (r) {
+ return r;
+ } else {
+ n = pedid[0x7e];
+
+ /*
+ * README: need to comply with max_length set by the caller.
+ * Better implementation should be to allocate necessary
+ * memory to store EDID according to nb_block field found
+ * in first block
+ */
+ if (n > max_ext_blocks)
+ n = max_ext_blocks;
+
+ for (i = 1; i <= n; i++) {
+ r = hdmi_core_ddc_edid(pedid, i);
+ if (r)
+ return r;
+ }
+ }
+ return 0;
+}
+
+static int get_timings_index(void)
+{
+ int code;
+
+ if (hdmi.mode == 0)
+ code = code_vesa[hdmi.code];
+ else
+ code = code_cea[hdmi.code];
+
+ if (code == -1) {
+ /* HDMI code 4 corresponds to 640 * 480 VGA */
+ hdmi.code = 4;
+ /* DVI mode 1 corresponds to HDMI 0 to DVI */
+ hdmi.mode = HDMI_DVI;
+
+ code = code_vesa[hdmi.code];
+ }
+ return code;
+}
+
+static struct hdmi_cm hdmi_get_code(struct omap_video_timings *timing)
+{
+ int i = 0, code = -1, temp_vsync = 0, temp_hsync = 0;
+ int timing_vsync = 0, timing_hsync = 0;
+ struct omap_video_timings temp;
+ struct hdmi_cm cm = {-1};
+ DSSDBG("hdmi_get_code\n");
+
+ for (i = 0; i < OMAP_HDMI_TIMINGS_NB; i++) {
+ temp = cea_vesa_timings[i].timings;
+ if ((temp.pixel_clock == timing->pixel_clock) &&
+ (temp.x_res == timing->x_res) &&
+ (temp.y_res == timing->y_res)) {
+
+ temp_hsync = temp.hfp + temp.hsw + temp.hbp;
+ timing_hsync = timing->hfp + timing->hsw + timing->hbp;
+ temp_vsync = temp.vfp + temp.vsw + temp.vbp;
+ timing_vsync = timing->vfp + timing->vsw + timing->vbp;
+
+ DSSDBG("temp_hsync = %d , temp_vsync = %d"
+ "timing_hsync = %d, timing_vsync = %d\n",
+ temp_hsync, temp_hsync,
+ timing_hsync, timing_vsync);
+
+ if ((temp_hsync == timing_hsync) &&
+ (temp_vsync == timing_vsync)) {
+ code = i;
+ cm.code = code_index[i];
+ if (code < 14)
+ cm.mode = HDMI_HDMI;
+ else
+ cm.mode = HDMI_DVI;
+ DSSDBG("Hdmi_code = %d mode = %d\n",
+ cm.code, cm.mode);
+ break;
+ }
+ }
+ }
+
+ return cm;
+}
+
+static void get_horz_vert_timing_info(int current_descriptor_addrs, u8 *edid ,
+ struct omap_video_timings *timings)
+{
+ /* X and Y resolution */
+ timings->x_res = (((edid[current_descriptor_addrs + 4] & 0xF0) << 4) |
+ edid[current_descriptor_addrs + 2]);
+ timings->y_res = (((edid[current_descriptor_addrs + 7] & 0xF0) << 4) |
+ edid[current_descriptor_addrs + 5]);
+
+ timings->pixel_clock = ((edid[current_descriptor_addrs + 1] << 8) |
+ edid[current_descriptor_addrs]);
+
+ timings->pixel_clock = 10 * timings->pixel_clock;
+
+ /* HORIZONTAL FRONT PORCH */
+ timings->hfp = edid[current_descriptor_addrs + 8] |
+ ((edid[current_descriptor_addrs + 11] & 0xc0) << 2);
+ /* HORIZONTAL SYNC WIDTH */
+ timings->hsw = edid[current_descriptor_addrs + 9] |
+ ((edid[current_descriptor_addrs + 11] & 0x30) << 4);
+ /* HORIZONTAL BACK PORCH */
+ timings->hbp = (((edid[current_descriptor_addrs + 4] & 0x0F) << 8) |
+ edid[current_descriptor_addrs + 3]) -
+ (timings->hfp + timings->hsw);
+ /* VERTICAL FRONT PORCH */
+ timings->vfp = ((edid[current_descriptor_addrs + 10] & 0xF0) >> 4) |
+ ((edid[current_descriptor_addrs + 11] & 0x0f) << 2);
+ /* VERTICAL SYNC WIDTH */
+ timings->vsw = (edid[current_descriptor_addrs + 10] & 0x0F) |
+ ((edid[current_descriptor_addrs + 11] & 0x03) << 4);
+ /* VERTICAL BACK PORCH */
+ timings->vbp = (((edid[current_descriptor_addrs + 7] & 0x0F) << 8) |
+ edid[current_descriptor_addrs + 6]) -
+ (timings->vfp + timings->vsw);
+
+}
+
+/* Description : This function gets the resolution information from EDID */
+static void get_edid_timing_data(u8 *edid)
+{
+ u8 count;
+ u16 current_descriptor_addrs;
+ struct hdmi_cm cm;
+ struct omap_video_timings edid_timings;
+
+ /* seach block 0, there are 4 DTDs arranged in priority order */
+ for (count = 0; count < EDID_SIZE_BLOCK0_TIMING_DESCRIPTOR; count++) {
+ current_descriptor_addrs =
+ EDID_DESCRIPTOR_BLOCK0_ADDRESS +
+ count * EDID_TIMING_DESCRIPTOR_SIZE;
+ get_horz_vert_timing_info(current_descriptor_addrs,
+ edid, &edid_timings);
+ cm = hdmi_get_code(&edid_timings);
+ DSSDBG("Block0[%d] value matches code = %d , mode = %d\n",
+ count, cm.code, cm.mode);
+ if (cm.code == -1) {
+ continue;
+ } else {
+ hdmi.code = cm.code;
+ hdmi.mode = cm.mode;
+ DSSDBG("code = %d , mode = %d\n",
+ hdmi.code, hdmi.mode);
+ return;
+ }
+ }
+ if (edid[0x7e] != 0x00) {
+ for (count = 0; count < EDID_SIZE_BLOCK1_TIMING_DESCRIPTOR;
+ count++) {
+ current_descriptor_addrs =
+ EDID_DESCRIPTOR_BLOCK1_ADDRESS +
+ count * EDID_TIMING_DESCRIPTOR_SIZE;
+ get_horz_vert_timing_info(current_descriptor_addrs,
+ edid, &edid_timings);
+ cm = hdmi_get_code(&edid_timings);
+ DSSDBG("Block1[%d] value matches code = %d, mode = %d",
+ count, cm.code, cm.mode);
+ if (cm.code == -1) {
+ continue;
+ } else {
+ hdmi.code = cm.code;
+ hdmi.mode = cm.mode;
+ DSSDBG("code = %d , mode = %d\n",
+ hdmi.code, hdmi.mode);
+ return;
+ }
+ }
+ }
+
+ DSSINFO("no valid timing found , falling back to VGA\n");
+ hdmi.code = 4; /* setting default value of 640 480 VGA */
+ hdmi.mode = HDMI_DVI;
+}
+
+static void hdmi_read_edid(struct omap_video_timings *dp)
+{
+ int ret = 0, code;
+
+ memset(hdmi.edid, 0, HDMI_EDID_MAX_LENGTH);
+
+ if (!hdmi.edid_set)
+ ret = read_edid(hdmi.edid, HDMI_EDID_MAX_LENGTH);
+
+ if (!ret) {
+ if (!memcmp(hdmi.edid, edid_header, sizeof(edid_header))) {
+ /* search for timings of default resolution */
+ get_edid_timing_data(hdmi.edid);
+ hdmi.edid_set = true;
+ }
+ } else {
+ DSSWARN("failed to read E-EDID\n");
+ }
+
+ if (!hdmi.edid_set) {
+ DSSINFO("fallback to VGA\n");
+ hdmi.code = 4; /* setting default value of 640 480 VGA */
+ hdmi.mode = HDMI_DVI;
+ }
+
+ code = get_timings_index();
+
+ *dp = cea_vesa_timings[code].timings;
+}
+
+static void hdmi_core_init(struct hdmi_core_video_config *video_cfg,
+ struct hdmi_core_infoframe_avi *avi_cfg,
+ struct hdmi_core_packet_enable_repeat *repeat_cfg)
+{
+ DSSDBG("Enter hdmi_core_init\n");
+
+ /* video core */
+ video_cfg->ip_bus_width = HDMI_INPUT_8BIT;
+ video_cfg->op_dither_truc = HDMI_OUTPUTTRUNCATION_8BIT;
+ video_cfg->deep_color_pkt = HDMI_DEEPCOLORPACKECTDISABLE;
+ video_cfg->pkt_mode = HDMI_PACKETMODERESERVEDVALUE;
+ video_cfg->hdmi_dvi = HDMI_DVI;
+ video_cfg->tclk_sel_clkmult = HDMI_FPLL10IDCK;
+
+ /* info frame */
+ avi_cfg->db1_format = 0;
+ avi_cfg->db1_active_info = 0;
+ avi_cfg->db1_bar_info_dv = 0;
+ avi_cfg->db1_scan_info = 0;
+ avi_cfg->db2_colorimetry = 0;
+ avi_cfg->db2_aspect_ratio = 0;
+ avi_cfg->db2_active_fmt_ar = 0;
+ avi_cfg->db3_itc = 0;
+ avi_cfg->db3_ec = 0;
+ avi_cfg->db3_q_range = 0;
+ avi_cfg->db3_nup_scaling = 0;
+ avi_cfg->db4_videocode = 0;
+ avi_cfg->db5_pixel_repeat = 0;
+ avi_cfg->db6_7_line_eoftop = 0 ;
+ avi_cfg->db8_9_line_sofbottom = 0;
+ avi_cfg->db10_11_pixel_eofleft = 0;
+ avi_cfg->db12_13_pixel_sofright = 0;
+
+ /* packet enable and repeat */
+ repeat_cfg->audio_pkt = 0;
+ repeat_cfg->audio_pkt_repeat = 0;
+ repeat_cfg->avi_infoframe = 0;
+ repeat_cfg->avi_infoframe_repeat = 0;
+ repeat_cfg->gen_cntrl_pkt = 0;
+ repeat_cfg->gen_cntrl_pkt_repeat = 0;
+ repeat_cfg->generic_pkt = 0;
+ repeat_cfg->generic_pkt_repeat = 0;
+}
+
+static void hdmi_core_powerdown_disable(void)
+{
+ DSSDBG("Enter hdmi_core_powerdown_disable\n");
+ REG_FLD_MOD(HDMI_CORE_CTRL1, 0x0, 0, 0);
+}
+
+static void hdmi_core_swreset_release(void)
+{
+ DSSDBG("Enter hdmi_core_swreset_release\n");
+ REG_FLD_MOD(HDMI_CORE_SYS_SRST, 0x0, 0, 0);
+}
+
+static void hdmi_core_swreset_assert(void)
+{
+ DSSDBG("Enter hdmi_core_swreset_assert\n");
+ REG_FLD_MOD(HDMI_CORE_SYS_SRST, 0x1, 0, 0);
+}
+
+/* DSS_HDMI_CORE_VIDEO_CONFIG */
+static void hdmi_core_video_config(struct hdmi_core_video_config *cfg)
+{
+ u32 r = 0;
+
+ /* sys_ctrl1 default configuration not tunable */
+ r = hdmi_read_reg(HDMI_CORE_CTRL1);
+ r = FLD_MOD(r, HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC, 5, 5);
+ r = FLD_MOD(r, HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC, 4, 4);
+ r = FLD_MOD(r, HDMI_CORE_CTRL1_BSEL_24BITBUS, 2, 2);
+ r = FLD_MOD(r, HDMI_CORE_CTRL1_EDGE_RISINGEDGE, 1, 1);
+ hdmi_write_reg(HDMI_CORE_CTRL1, r);
+
+ REG_FLD_MOD(HDMI_CORE_SYS_VID_ACEN, cfg->ip_bus_width, 7, 6);
+
+ /* Vid_Mode */
+ r = hdmi_read_reg(HDMI_CORE_SYS_VID_MODE);
+
+ /* dither truncation configuration */
+ if (cfg->op_dither_truc > HDMI_OUTPUTTRUNCATION_12BIT) {
+ r = FLD_MOD(r, cfg->op_dither_truc - 3, 7, 6);
+ r = FLD_MOD(r, 1, 5, 5);
+ } else {
+ r = FLD_MOD(r, cfg->op_dither_truc, 7, 6);
+ r = FLD_MOD(r, 0, 5, 5);
+ }
+ hdmi_write_reg(HDMI_CORE_SYS_VID_MODE, r);
+
+ /* HDMI_Ctrl */
+ r = hdmi_read_reg(HDMI_CORE_AV_HDMI_CTRL);
+ r = FLD_MOD(r, cfg->deep_color_pkt, 6, 6);
+ r = FLD_MOD(r, cfg->pkt_mode, 5, 3);
+ r = FLD_MOD(r, cfg->hdmi_dvi, 0, 0);
+ hdmi_write_reg(HDMI_CORE_AV_HDMI_CTRL, r);
+
+ /* TMDS_CTRL */
+ REG_FLD_MOD(HDMI_CORE_SYS_TMDS_CTRL,
+ cfg->tclk_sel_clkmult, 6, 5);
+}
+
+static void hdmi_core_aux_infoframe_avi_config(
+ struct hdmi_core_infoframe_avi info_avi)
+{
+ u32 val;
+ char sum = 0, checksum = 0;
+
+ sum += 0x82 + 0x002 + 0x00D;
+ hdmi_write_reg(HDMI_CORE_AV_AVI_TYPE, 0x082);
+ hdmi_write_reg(HDMI_CORE_AV_AVI_VERS, 0x002);
+ hdmi_write_reg(HDMI_CORE_AV_AVI_LEN, 0x00D);
+
+ val = (info_avi.db1_format << 5) |
+ (info_avi.db1_active_info << 4) |
+ (info_avi.db1_bar_info_dv << 2) |
+ (info_avi.db1_scan_info);
+ hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(0), val);
+ sum += val;
+
+ val = (info_avi.db2_colorimetry << 6) |
+ (info_avi.db2_aspect_ratio << 4) |
+ (info_avi.db2_active_fmt_ar);
+ hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(1), val);
+ sum += val;
+
+ val = (info_avi.db3_itc << 7) |
+ (info_avi.db3_ec << 4) |
+ (info_avi.db3_q_range << 2) |
+ (info_avi.db3_nup_scaling);
+ hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(2), val);
+ sum += val;
+
+ hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(3), info_avi.db4_videocode);
+ sum += info_avi.db4_videocode;
+
+ val = info_avi.db5_pixel_repeat;
+ hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(4), val);
+ sum += val;
+
+ val = info_avi.db6_7_line_eoftop & 0x00FF;
+ hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(5), val);
+ sum += val;
+
+ val = ((info_avi.db6_7_line_eoftop >> 8) & 0x00FF);
+ hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(6), val);
+ sum += val;
+
+ val = info_avi.db8_9_line_sofbottom & 0x00FF;
+ hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(7), val);
+ sum += val;
+
+ val = ((info_avi.db8_9_line_sofbottom >> 8) & 0x00FF);
+ hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(8), val);
+ sum += val;
+
+ val = info_avi.db10_11_pixel_eofleft & 0x00FF;
+ hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(9), val);
+ sum += val;
+
+ val = ((info_avi.db10_11_pixel_eofleft >> 8) & 0x00FF);
+ hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(10), val);
+ sum += val;
+
+ val = info_avi.db12_13_pixel_sofright & 0x00FF;
+ hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(11), val);
+ sum += val;
+
+ val = ((info_avi.db12_13_pixel_sofright >> 8) & 0x00FF);
+ hdmi_write_reg(HDMI_CORE_AV_AVI_DBYTE(12), val);
+ sum += val;
+
+ checksum = 0x100 - sum;
+ hdmi_write_reg(HDMI_CORE_AV_AVI_CHSUM, checksum);
+}
+
+static void hdmi_core_av_packet_config(
+ struct hdmi_core_packet_enable_repeat repeat_cfg)
+{
+ /* enable/repeat the infoframe */
+ hdmi_write_reg(HDMI_CORE_AV_PB_CTRL1,
+ (repeat_cfg.audio_pkt << 5) |
+ (repeat_cfg.audio_pkt_repeat << 4) |
+ (repeat_cfg.avi_infoframe << 1) |
+ (repeat_cfg.avi_infoframe_repeat));
+
+ /* enable/repeat the packet */
+ hdmi_write_reg(HDMI_CORE_AV_PB_CTRL2,
+ (repeat_cfg.gen_cntrl_pkt << 3) |
+ (repeat_cfg.gen_cntrl_pkt_repeat << 2) |
+ (repeat_cfg.generic_pkt << 1) |
+ (repeat_cfg.generic_pkt_repeat));
+}
+
+static void hdmi_wp_init(struct omap_video_timings *timings,
+ struct hdmi_video_format *video_fmt,
+ struct hdmi_video_interface *video_int)
+{
+ DSSDBG("Enter hdmi_wp_init\n");
+
+ timings->hbp = 0;
+ timings->hfp = 0;
+ timings->hsw = 0;
+ timings->vbp = 0;
+ timings->vfp = 0;
+ timings->vsw = 0;
+
+ video_fmt->packing_mode = HDMI_PACK_10b_RGB_YUV444;
+ video_fmt->y_res = 0;
+ video_fmt->x_res = 0;
+
+ video_int->vsp = 0;
+ video_int->hsp = 0;
+
+ video_int->interlacing = 0;
+ video_int->tm = 0; /* HDMI_TIMING_SLAVE */
+
+}
+
+static void hdmi_wp_video_start(bool start)
+{
+ REG_FLD_MOD(HDMI_WP_VIDEO_CFG, start, 31, 31);
+}
+
+static void hdmi_wp_video_init_format(struct hdmi_video_format *video_fmt,
+ struct omap_video_timings *timings, struct hdmi_config *param)
+{
+ DSSDBG("Enter hdmi_wp_video_init_format\n");
+
+ video_fmt->y_res = param->timings.timings.y_res;
+ video_fmt->x_res = param->timings.timings.x_res;
+
+ timings->hbp = param->timings.timings.hbp;
+ timings->hfp = param->timings.timings.hfp;
+ timings->hsw = param->timings.timings.hsw;
+ timings->vbp = param->timings.timings.vbp;
+ timings->vfp = param->timings.timings.vfp;
+ timings->vsw = param->timings.timings.vsw;
+}
+
+static void hdmi_wp_video_config_format(
+ struct hdmi_video_format *video_fmt)
+{
+ u32 l = 0;
+
+ REG_FLD_MOD(HDMI_WP_VIDEO_CFG, video_fmt->packing_mode, 10, 8);
+
+ l |= FLD_VAL(video_fmt->y_res, 31, 16);
+ l |= FLD_VAL(video_fmt->x_res, 15, 0);
+ hdmi_write_reg(HDMI_WP_VIDEO_SIZE, l);
+}
+
+static void hdmi_wp_video_config_interface(
+ struct hdmi_video_interface *video_int)
+{
+ u32 r;
+ DSSDBG("Enter hdmi_wp_video_config_interface\n");
+
+ r = hdmi_read_reg(HDMI_WP_VIDEO_CFG);
+ r = FLD_MOD(r, video_int->vsp, 7, 7);
+ r = FLD_MOD(r, video_int->hsp, 6, 6);
+ r = FLD_MOD(r, video_int->interlacing, 3, 3);
+ r = FLD_MOD(r, video_int->tm, 1, 0);
+ hdmi_write_reg(HDMI_WP_VIDEO_CFG, r);
+}
+
+static void hdmi_wp_video_config_timing(
+ struct omap_video_timings *timings)
+{
+ u32 timing_h = 0;
+ u32 timing_v = 0;
+
+ DSSDBG("Enter hdmi_wp_video_config_timing\n");
+
+ timing_h |= FLD_VAL(timings->hbp, 31, 20);
+ timing_h |= FLD_VAL(timings->hfp, 19, 8);
+ timing_h |= FLD_VAL(timings->hsw, 7, 0);
+ hdmi_write_reg(HDMI_WP_VIDEO_TIMING_H, timing_h);
+
+ timing_v |= FLD_VAL(timings->vbp, 31, 20);
+ timing_v |= FLD_VAL(timings->vfp, 19, 8);
+ timing_v |= FLD_VAL(timings->vsw, 7, 0);
+ hdmi_write_reg(HDMI_WP_VIDEO_TIMING_V, timing_v);
+}
+
+static void hdmi_basic_configure(struct hdmi_config *cfg)
+{
+ /* HDMI */
+ struct omap_video_timings video_timing;
+ struct hdmi_video_format video_format;
+ struct hdmi_video_interface video_interface;
+ /* HDMI core */
+ struct hdmi_core_infoframe_avi avi_cfg;
+ struct hdmi_core_video_config v_core_cfg;
+ struct hdmi_core_packet_enable_repeat repeat_cfg;
+
+ hdmi_wp_init(&video_timing, &video_format,
+ &video_interface);
+
+ hdmi_core_init(&v_core_cfg,
+ &avi_cfg,
+ &repeat_cfg);
+
+ hdmi_wp_video_init_format(&video_format,
+ &video_timing, cfg);
+
+ hdmi_wp_video_config_timing(&video_timing);
+
+ /* video config */
+ video_format.packing_mode = HDMI_PACK_24b_RGB_YUV444_YUV422;
+
+ hdmi_wp_video_config_format(&video_format);
+
+ video_interface.vsp = cfg->timings.vsync_pol;
+ video_interface.hsp = cfg->timings.hsync_pol;
+ video_interface.interlacing = cfg->interlace;
+ video_interface.tm = 1 ; /* HDMI_TIMING_MASTER_24BIT */
+
+ hdmi_wp_video_config_interface(&video_interface);
+
+ /*
+ * configure core video part
+ * set software reset in the core
+ */
+ hdmi_core_swreset_assert();
+
+ /* power down off */
+ hdmi_core_powerdown_disable();
+
+ v_core_cfg.pkt_mode = HDMI_PACKETMODE24BITPERPIXEL;
+ v_core_cfg.hdmi_dvi = cfg->cm.mode;
+
+ hdmi_core_video_config(&v_core_cfg);
+
+ /* release software reset in the core */
+ hdmi_core_swreset_release();
+
+ /*
+ * configure packet
+ * info frame video see doc CEA861-D page 65
+ */
+ avi_cfg.db1_format = HDMI_INFOFRAME_AVI_DB1Y_RGB;
+ avi_cfg.db1_active_info =
+ HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF;
+ avi_cfg.db1_bar_info_dv = HDMI_INFOFRAME_AVI_DB1B_NO;
+ avi_cfg.db1_scan_info = HDMI_INFOFRAME_AVI_DB1S_0;
+ avi_cfg.db2_colorimetry = HDMI_INFOFRAME_AVI_DB2C_NO;
+ avi_cfg.db2_aspect_ratio = HDMI_INFOFRAME_AVI_DB2M_NO;
+ avi_cfg.db2_active_fmt_ar = HDMI_INFOFRAME_AVI_DB2R_SAME;
+ avi_cfg.db3_itc = HDMI_INFOFRAME_AVI_DB3ITC_NO;
+ avi_cfg.db3_ec = HDMI_INFOFRAME_AVI_DB3EC_XVYUV601;
+ avi_cfg.db3_q_range = HDMI_INFOFRAME_AVI_DB3Q_DEFAULT;
+ avi_cfg.db3_nup_scaling = HDMI_INFOFRAME_AVI_DB3SC_NO;
+ avi_cfg.db4_videocode = cfg->cm.code;
+ avi_cfg.db5_pixel_repeat = HDMI_INFOFRAME_AVI_DB5PR_NO;
+ avi_cfg.db6_7_line_eoftop = 0;
+ avi_cfg.db8_9_line_sofbottom = 0;
+ avi_cfg.db10_11_pixel_eofleft = 0;
+ avi_cfg.db12_13_pixel_sofright = 0;
+
+ hdmi_core_aux_infoframe_avi_config(avi_cfg);
+
+ /* enable/repeat the infoframe */
+ repeat_cfg.avi_infoframe = HDMI_PACKETENABLE;
+ repeat_cfg.avi_infoframe_repeat = HDMI_PACKETREPEATON;
+ /* wakeup */
+ repeat_cfg.audio_pkt = HDMI_PACKETENABLE;
+ repeat_cfg.audio_pkt_repeat = HDMI_PACKETREPEATON;
+ hdmi_core_av_packet_config(repeat_cfg);
+}
+
+static void update_hdmi_timings(struct hdmi_config *cfg,
+ struct omap_video_timings *timings, int code)
+{
+ cfg->timings.timings.x_res = timings->x_res;
+ cfg->timings.timings.y_res = timings->y_res;
+ cfg->timings.timings.hbp = timings->hbp;
+ cfg->timings.timings.hfp = timings->hfp;
+ cfg->timings.timings.hsw = timings->hsw;
+ cfg->timings.timings.vbp = timings->vbp;
+ cfg->timings.timings.vfp = timings->vfp;
+ cfg->timings.timings.vsw = timings->vsw;
+ cfg->timings.timings.pixel_clock = timings->pixel_clock;
+ cfg->timings.vsync_pol = cea_vesa_timings[code].vsync_pol;
+ cfg->timings.hsync_pol = cea_vesa_timings[code].hsync_pol;
+}
+
+static void hdmi_compute_pll(unsigned long clkin, int phy,
+ int n, struct hdmi_pll_info *pi)
+{
+ unsigned long refclk;
+ u32 mf;
+
+ /*
+ * Input clock is predivided by N + 1
+ * out put of which is reference clk
+ */
+ refclk = clkin / (n + 1);
+ pi->regn = n;
+
+ /*
+ * multiplier is pixel_clk/ref_clk
+ * Multiplying by 100 to avoid fractional part removal
+ */
+ pi->regm = (phy * 100/(refclk))/100;
+ pi->regm2 = 1;
+
+ /*
+ * fractional multiplier is remainder of the difference between
+ * multiplier and actual phy(required pixel clock thus should be
+ * multiplied by 2^18(262144) divided by the reference clock
+ */
+ mf = (phy - pi->regm * refclk) * 262144;
+ pi->regmf = mf/(refclk);
+
+ /*
+ * Dcofreq should be set to 1 if required pixel clock
+ * is greater than 1000MHz
+ */
+ pi->dcofreq = phy > 1000 * 100;
+ pi->regsd = ((pi->regm * clkin / 10) / ((n + 1) * 250) + 5) / 10;
+
+ DSSDBG("M = %d Mf = %d\n", pi->regm, pi->regmf);
+ DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd);
+}
+
+static void hdmi_enable_clocks(int enable)
+{
+ if (enable)
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK |
+ DSS_CLK_SYSCK | DSS_CLK_VIDFCK);
+ else
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK |
+ DSS_CLK_SYSCK | DSS_CLK_VIDFCK);
+}
+
+static int hdmi_power_on(struct omap_dss_device *dssdev)
+{
+ int r, code = 0;
+ struct hdmi_pll_info pll_data;
+ struct omap_video_timings *p;
+ int clkin, n, phy;
+
+ hdmi_enable_clocks(1);
+
+ dispc_enable_channel(OMAP_DSS_CHANNEL_DIGIT, 0);
+
+ p = &dssdev->panel.timings;
+
+ DSSDBG("hdmi_power_on x_res= %d y_res = %d\n",
+ dssdev->panel.timings.x_res,
+ dssdev->panel.timings.y_res);
+
+ if (!hdmi.custom_set) {
+ DSSDBG("Read EDID as no EDID is not set on poweron\n");
+ hdmi_read_edid(p);
+ }
+ code = get_timings_index();
+ dssdev->panel.timings = cea_vesa_timings[code].timings;
+ update_hdmi_timings(&hdmi.cfg, p, code);
+
+ clkin = 3840; /* 38.4 MHz */
+ n = 15; /* this is a constant for our math */
+ phy = p->pixel_clock;
+
+ hdmi_compute_pll(clkin, phy, n, &pll_data);
+
+ hdmi_wp_video_start(0);
+
+ /* config the PLL and PHY first */
+ r = hdmi_pll_program(&pll_data);
+ if (r) {
+ DSSDBG("Failed to lock PLL\n");
+ goto err;
+ }
+
+ r = hdmi_phy_init();
+ if (r) {
+ DSSDBG("Failed to start PHY\n");
+ goto err;
+ }
+
+ hdmi.cfg.cm.mode = hdmi.mode;
+ hdmi.cfg.cm.code = hdmi.code;
+ hdmi_basic_configure(&hdmi.cfg);
+
+ /* Make selection of HDMI in DSS */
+ dss_select_hdmi_venc_clk_source(DSS_HDMI_M_PCLK);
+
+ /* Select the dispc clock source as PRCM clock, to ensure that it is not
+ * DSI PLL source as the clock selected by DSI PLL might not be
+ * sufficient for the resolution selected / that can be changed
+ * dynamically by user. This can be moved to single location , say
+ * Boardfile.
+ */
+ dss_select_dispc_clk_source(DSS_CLK_SRC_FCK);
+
+ /* bypass TV gamma table */
+ dispc_enable_gamma_table(0);
+
+ /* tv size */
+ dispc_set_digit_size(dssdev->panel.timings.x_res,
+ dssdev->panel.timings.y_res);
+
+ dispc_enable_channel(OMAP_DSS_CHANNEL_DIGIT, 1);
+
+ hdmi_wp_video_start(1);
+
+ return 0;
+err:
+ hdmi_enable_clocks(0);
+ return -EIO;
+}
+
+static void hdmi_power_off(struct omap_dss_device *dssdev)
+{
+ dispc_enable_channel(OMAP_DSS_CHANNEL_DIGIT, 0);
+
+ hdmi_wp_video_start(0);
+ hdmi_phy_off();
+ hdmi_set_pll_pwr(HDMI_PLLPWRCMD_ALLOFF);
+ hdmi_enable_clocks(0);
+
+ hdmi.edid_set = 0;
+}
+
+int omapdss_hdmi_display_check_timing(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ struct hdmi_cm cm;
+
+ cm = hdmi_get_code(timings);
+ if (cm.code == -1) {
+ DSSERR("Invalid timing entered\n");
+ return -EINVAL;
+ }
+
+ return 0;
+
+}
+
+void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev)
+{
+ struct hdmi_cm cm;
+
+ hdmi.custom_set = 1;
+ cm = hdmi_get_code(&dssdev->panel.timings);
+ hdmi.code = cm.code;
+ hdmi.mode = cm.mode;
+ omapdss_hdmi_display_enable(dssdev);
+ hdmi.custom_set = 0;
+}
+
+int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ DSSDBG("ENTER hdmi_display_enable\n");
+
+ mutex_lock(&hdmi.lock);
+
+ r = omap_dss_start_device(dssdev);
+ if (r) {
+ DSSERR("failed to start device\n");
+ goto err0;
+ }
+
+ if (dssdev->platform_enable) {
+ r = dssdev->platform_enable(dssdev);
+ if (r) {
+ DSSERR("failed to enable GPIO's\n");
+ goto err1;
+ }
+ }
+
+ r = hdmi_power_on(dssdev);
+ if (r) {
+ DSSERR("failed to power on device\n");
+ goto err2;
+ }
+
+ mutex_unlock(&hdmi.lock);
+ return 0;
+
+err2:
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+err1:
+ omap_dss_stop_device(dssdev);
+err0:
+ mutex_unlock(&hdmi.lock);
+ return r;
+}
+
+void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev)
+{
+ DSSDBG("Enter hdmi_display_disable\n");
+
+ mutex_lock(&hdmi.lock);
+
+ hdmi_power_off(dssdev);
+
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+
+ omap_dss_stop_device(dssdev);
+
+ mutex_unlock(&hdmi.lock);
+}
+
+/* HDMI HW IP initialisation */
+static int omapdss_hdmihw_probe(struct platform_device *pdev)
+{
+ struct resource *hdmi_mem;
+
+ hdmi.pdata = pdev->dev.platform_data;
+ hdmi.pdev = pdev;
+
+ mutex_init(&hdmi.lock);
+
+ hdmi_mem = platform_get_resource(hdmi.pdev, IORESOURCE_MEM, 0);
+ if (!hdmi_mem) {
+ DSSERR("can't get IORESOURCE_MEM HDMI\n");
+ return -EINVAL;
+ }
+
+ /* Base address taken from platform */
+ hdmi.base_wp = ioremap(hdmi_mem->start, resource_size(hdmi_mem));
+ if (!hdmi.base_wp) {
+ DSSERR("can't ioremap WP\n");
+ return -ENOMEM;
+ }
+
+ hdmi_panel_init();
+
+ return 0;
+}
+
+static int omapdss_hdmihw_remove(struct platform_device *pdev)
+{
+ hdmi_panel_exit();
+
+ iounmap(hdmi.base_wp);
+
+ return 0;
+}
+
+static struct platform_driver omapdss_hdmihw_driver = {
+ .probe = omapdss_hdmihw_probe,
+ .remove = omapdss_hdmihw_remove,
+ .driver = {
+ .name = "omapdss_hdmi",
+ .owner = THIS_MODULE,
+ },
+};
+
+int hdmi_init_platform_driver(void)
+{
+ return platform_driver_register(&omapdss_hdmihw_driver);
+}
+
+void hdmi_uninit_platform_driver(void)
+{
+ return platform_driver_unregister(&omapdss_hdmihw_driver);
+}
diff --git a/drivers/video/omap2/dss/hdmi.h b/drivers/video/omap2/dss/hdmi.h
new file mode 100644
index 00000000000..9887ab96da3
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi.h
@@ -0,0 +1,415 @@
+/*
+ * hdmi.h
+ *
+ * HDMI driver definition for TI OMAP4 processors.
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _OMAP4_DSS_HDMI_H_
+#define _OMAP4_DSS_HDMI_H_
+
+#include <linux/string.h>
+#include <plat/display.h>
+
+#define HDMI_WP 0x0
+#define HDMI_CORE_SYS 0x400
+#define HDMI_CORE_AV 0x900
+#define HDMI_PLLCTRL 0x200
+#define HDMI_PHY 0x300
+
+struct hdmi_reg { u16 idx; };
+
+#define HDMI_REG(idx) ((const struct hdmi_reg) { idx })
+
+/* HDMI Wrapper */
+#define HDMI_WP_REG(idx) HDMI_REG(HDMI_WP + idx)
+
+#define HDMI_WP_REVISION HDMI_WP_REG(0x0)
+#define HDMI_WP_SYSCONFIG HDMI_WP_REG(0x10)
+#define HDMI_WP_IRQSTATUS_RAW HDMI_WP_REG(0x24)
+#define HDMI_WP_IRQSTATUS HDMI_WP_REG(0x28)
+#define HDMI_WP_PWR_CTRL HDMI_WP_REG(0x40)
+#define HDMI_WP_IRQENABLE_SET HDMI_WP_REG(0x2C)
+#define HDMI_WP_VIDEO_CFG HDMI_WP_REG(0x50)
+#define HDMI_WP_VIDEO_SIZE HDMI_WP_REG(0x60)
+#define HDMI_WP_VIDEO_TIMING_H HDMI_WP_REG(0x68)
+#define HDMI_WP_VIDEO_TIMING_V HDMI_WP_REG(0x6C)
+#define HDMI_WP_WP_CLK HDMI_WP_REG(0x70)
+
+/* HDMI IP Core System */
+#define HDMI_CORE_SYS_REG(idx) HDMI_REG(HDMI_CORE_SYS + idx)
+
+#define HDMI_CORE_SYS_VND_IDL HDMI_CORE_SYS_REG(0x0)
+#define HDMI_CORE_SYS_DEV_IDL HDMI_CORE_SYS_REG(0x8)
+#define HDMI_CORE_SYS_DEV_IDH HDMI_CORE_SYS_REG(0xC)
+#define HDMI_CORE_SYS_DEV_REV HDMI_CORE_SYS_REG(0x10)
+#define HDMI_CORE_SYS_SRST HDMI_CORE_SYS_REG(0x14)
+#define HDMI_CORE_CTRL1 HDMI_CORE_SYS_REG(0x20)
+#define HDMI_CORE_SYS_SYS_STAT HDMI_CORE_SYS_REG(0x24)
+#define HDMI_CORE_SYS_VID_ACEN HDMI_CORE_SYS_REG(0x124)
+#define HDMI_CORE_SYS_VID_MODE HDMI_CORE_SYS_REG(0x128)
+#define HDMI_CORE_SYS_INTR_STATE HDMI_CORE_SYS_REG(0x1C0)
+#define HDMI_CORE_SYS_INTR1 HDMI_CORE_SYS_REG(0x1C4)
+#define HDMI_CORE_SYS_INTR2 HDMI_CORE_SYS_REG(0x1C8)
+#define HDMI_CORE_SYS_INTR3 HDMI_CORE_SYS_REG(0x1CC)
+#define HDMI_CORE_SYS_INTR4 HDMI_CORE_SYS_REG(0x1D0)
+#define HDMI_CORE_SYS_UMASK1 HDMI_CORE_SYS_REG(0x1D4)
+#define HDMI_CORE_SYS_TMDS_CTRL HDMI_CORE_SYS_REG(0x208)
+#define HDMI_CORE_SYS_DE_DLY HDMI_CORE_SYS_REG(0xC8)
+#define HDMI_CORE_SYS_DE_CTRL HDMI_CORE_SYS_REG(0xCC)
+#define HDMI_CORE_SYS_DE_TOP HDMI_CORE_SYS_REG(0xD0)
+#define HDMI_CORE_SYS_DE_CNTL HDMI_CORE_SYS_REG(0xD8)
+#define HDMI_CORE_SYS_DE_CNTH HDMI_CORE_SYS_REG(0xDC)
+#define HDMI_CORE_SYS_DE_LINL HDMI_CORE_SYS_REG(0xE0)
+#define HDMI_CORE_SYS_DE_LINH_1 HDMI_CORE_SYS_REG(0xE4)
+#define HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC 0x1
+#define HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC 0x1
+#define HDMI_CORE_CTRL1_BSEL_24BITBUS 0x1
+#define HDMI_CORE_CTRL1_EDGE_RISINGEDGE 0x1
+
+/* HDMI DDC E-DID */
+#define HDMI_CORE_DDC_CMD HDMI_CORE_SYS_REG(0x3CC)
+#define HDMI_CORE_DDC_STATUS HDMI_CORE_SYS_REG(0x3C8)
+#define HDMI_CORE_DDC_ADDR HDMI_CORE_SYS_REG(0x3B4)
+#define HDMI_CORE_DDC_OFFSET HDMI_CORE_SYS_REG(0x3BC)
+#define HDMI_CORE_DDC_COUNT1 HDMI_CORE_SYS_REG(0x3C0)
+#define HDMI_CORE_DDC_COUNT2 HDMI_CORE_SYS_REG(0x3C4)
+#define HDMI_CORE_DDC_DATA HDMI_CORE_SYS_REG(0x3D0)
+#define HDMI_CORE_DDC_SEGM HDMI_CORE_SYS_REG(0x3B8)
+
+/* HDMI IP Core Audio Video */
+#define HDMI_CORE_AV_REG(idx) HDMI_REG(HDMI_CORE_AV + idx)
+
+#define HDMI_CORE_AV_HDMI_CTRL HDMI_CORE_AV_REG(0xBC)
+#define HDMI_CORE_AV_DPD HDMI_CORE_AV_REG(0xF4)
+#define HDMI_CORE_AV_PB_CTRL1 HDMI_CORE_AV_REG(0xF8)
+#define HDMI_CORE_AV_PB_CTRL2 HDMI_CORE_AV_REG(0xFC)
+#define HDMI_CORE_AV_AVI_TYPE HDMI_CORE_AV_REG(0x100)
+#define HDMI_CORE_AV_AVI_VERS HDMI_CORE_AV_REG(0x104)
+#define HDMI_CORE_AV_AVI_LEN HDMI_CORE_AV_REG(0x108)
+#define HDMI_CORE_AV_AVI_CHSUM HDMI_CORE_AV_REG(0x10C)
+#define HDMI_CORE_AV_AVI_DBYTE(n) HDMI_CORE_AV_REG(n * 4 + 0x110)
+#define HDMI_CORE_AV_AVI_DBYTE_NELEMS HDMI_CORE_AV_REG(15)
+#define HDMI_CORE_AV_SPD_DBYTE HDMI_CORE_AV_REG(0x190)
+#define HDMI_CORE_AV_SPD_DBYTE_NELEMS HDMI_CORE_AV_REG(27)
+#define HDMI_CORE_AV_MPEG_DBYTE HDMI_CORE_AV_REG(0x290)
+#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS HDMI_CORE_AV_REG(27)
+#define HDMI_CORE_AV_GEN_DBYTE HDMI_CORE_AV_REG(0x300)
+#define HDMI_CORE_AV_GEN_DBYTE_NELEMS HDMI_CORE_AV_REG(31)
+#define HDMI_CORE_AV_GEN2_DBYTE HDMI_CORE_AV_REG(0x380)
+#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS HDMI_CORE_AV_REG(31)
+#define HDMI_CORE_AV_ACR_CTRL HDMI_CORE_AV_REG(0x4)
+#define HDMI_CORE_AV_FREQ_SVAL HDMI_CORE_AV_REG(0x8)
+#define HDMI_CORE_AV_N_SVAL1 HDMI_CORE_AV_REG(0xC)
+#define HDMI_CORE_AV_N_SVAL2 HDMI_CORE_AV_REG(0x10)
+#define HDMI_CORE_AV_N_SVAL3 HDMI_CORE_AV_REG(0x14)
+#define HDMI_CORE_AV_CTS_SVAL1 HDMI_CORE_AV_REG(0x18)
+#define HDMI_CORE_AV_CTS_SVAL2 HDMI_CORE_AV_REG(0x1C)
+#define HDMI_CORE_AV_CTS_SVAL3 HDMI_CORE_AV_REG(0x20)
+#define HDMI_CORE_AV_CTS_HVAL1 HDMI_CORE_AV_REG(0x24)
+#define HDMI_CORE_AV_CTS_HVAL2 HDMI_CORE_AV_REG(0x28)
+#define HDMI_CORE_AV_CTS_HVAL3 HDMI_CORE_AV_REG(0x2C)
+#define HDMI_CORE_AV_AUD_MODE HDMI_CORE_AV_REG(0x50)
+#define HDMI_CORE_AV_SPDIF_CTRL HDMI_CORE_AV_REG(0x54)
+#define HDMI_CORE_AV_HW_SPDIF_FS HDMI_CORE_AV_REG(0x60)
+#define HDMI_CORE_AV_SWAP_I2S HDMI_CORE_AV_REG(0x64)
+#define HDMI_CORE_AV_SPDIF_ERTH HDMI_CORE_AV_REG(0x6C)
+#define HDMI_CORE_AV_I2S_IN_MAP HDMI_CORE_AV_REG(0x70)
+#define HDMI_CORE_AV_I2S_IN_CTRL HDMI_CORE_AV_REG(0x74)
+#define HDMI_CORE_AV_I2S_CHST0 HDMI_CORE_AV_REG(0x78)
+#define HDMI_CORE_AV_I2S_CHST1 HDMI_CORE_AV_REG(0x7C)
+#define HDMI_CORE_AV_I2S_CHST2 HDMI_CORE_AV_REG(0x80)
+#define HDMI_CORE_AV_I2S_CHST4 HDMI_CORE_AV_REG(0x84)
+#define HDMI_CORE_AV_I2S_CHST5 HDMI_CORE_AV_REG(0x88)
+#define HDMI_CORE_AV_ASRC HDMI_CORE_AV_REG(0x8C)
+#define HDMI_CORE_AV_I2S_IN_LEN HDMI_CORE_AV_REG(0x90)
+#define HDMI_CORE_AV_HDMI_CTRL HDMI_CORE_AV_REG(0xBC)
+#define HDMI_CORE_AV_AUDO_TXSTAT HDMI_CORE_AV_REG(0xC0)
+#define HDMI_CORE_AV_AUD_PAR_BUSCLK_1 HDMI_CORE_AV_REG(0xCC)
+#define HDMI_CORE_AV_AUD_PAR_BUSCLK_2 HDMI_CORE_AV_REG(0xD0)
+#define HDMI_CORE_AV_AUD_PAR_BUSCLK_3 HDMI_CORE_AV_REG(0xD4)
+#define HDMI_CORE_AV_TEST_TXCTRL HDMI_CORE_AV_REG(0xF0)
+#define HDMI_CORE_AV_DPD HDMI_CORE_AV_REG(0xF4)
+#define HDMI_CORE_AV_PB_CTRL1 HDMI_CORE_AV_REG(0xF8)
+#define HDMI_CORE_AV_PB_CTRL2 HDMI_CORE_AV_REG(0xFC)
+#define HDMI_CORE_AV_AVI_TYPE HDMI_CORE_AV_REG(0x100)
+#define HDMI_CORE_AV_AVI_VERS HDMI_CORE_AV_REG(0x104)
+#define HDMI_CORE_AV_AVI_LEN HDMI_CORE_AV_REG(0x108)
+#define HDMI_CORE_AV_AVI_CHSUM HDMI_CORE_AV_REG(0x10C)
+#define HDMI_CORE_AV_SPD_TYPE HDMI_CORE_AV_REG(0x180)
+#define HDMI_CORE_AV_SPD_VERS HDMI_CORE_AV_REG(0x184)
+#define HDMI_CORE_AV_SPD_LEN HDMI_CORE_AV_REG(0x188)
+#define HDMI_CORE_AV_SPD_CHSUM HDMI_CORE_AV_REG(0x18C)
+#define HDMI_CORE_AV_MPEG_TYPE HDMI_CORE_AV_REG(0x280)
+#define HDMI_CORE_AV_MPEG_VERS HDMI_CORE_AV_REG(0x284)
+#define HDMI_CORE_AV_MPEG_LEN HDMI_CORE_AV_REG(0x288)
+#define HDMI_CORE_AV_MPEG_CHSUM HDMI_CORE_AV_REG(0x28C)
+#define HDMI_CORE_AV_CP_BYTE1 HDMI_CORE_AV_REG(0x37C)
+#define HDMI_CORE_AV_CEC_ADDR_ID HDMI_CORE_AV_REG(0x3FC)
+#define HDMI_CORE_AV_SPD_DBYTE_ELSIZE 0x4
+#define HDMI_CORE_AV_GEN2_DBYTE_ELSIZE 0x4
+#define HDMI_CORE_AV_MPEG_DBYTE_ELSIZE 0x4
+#define HDMI_CORE_AV_GEN_DBYTE_ELSIZE 0x4
+
+/* PLL */
+#define HDMI_PLL_REG(idx) HDMI_REG(HDMI_PLLCTRL + idx)
+
+#define PLLCTRL_PLL_CONTROL HDMI_PLL_REG(0x0)
+#define PLLCTRL_PLL_STATUS HDMI_PLL_REG(0x4)
+#define PLLCTRL_PLL_GO HDMI_PLL_REG(0x8)
+#define PLLCTRL_CFG1 HDMI_PLL_REG(0xC)
+#define PLLCTRL_CFG2 HDMI_PLL_REG(0x10)
+#define PLLCTRL_CFG3 HDMI_PLL_REG(0x14)
+#define PLLCTRL_CFG4 HDMI_PLL_REG(0x20)
+
+/* HDMI PHY */
+#define HDMI_PHY_REG(idx) HDMI_REG(HDMI_PHY + idx)
+
+#define HDMI_TXPHY_TX_CTRL HDMI_PHY_REG(0x0)
+#define HDMI_TXPHY_DIGITAL_CTRL HDMI_PHY_REG(0x4)
+#define HDMI_TXPHY_POWER_CTRL HDMI_PHY_REG(0x8)
+#define HDMI_TXPHY_PAD_CFG_CTRL HDMI_PHY_REG(0xC)
+
+/* HDMI EDID Length */
+#define HDMI_EDID_MAX_LENGTH 256
+#define EDID_TIMING_DESCRIPTOR_SIZE 0x12
+#define EDID_DESCRIPTOR_BLOCK0_ADDRESS 0x36
+#define EDID_DESCRIPTOR_BLOCK1_ADDRESS 0x80
+#define EDID_SIZE_BLOCK0_TIMING_DESCRIPTOR 4
+#define EDID_SIZE_BLOCK1_TIMING_DESCRIPTOR 4
+
+#define OMAP_HDMI_TIMINGS_NB 34
+
+#define REG_FLD_MOD(idx, val, start, end) \
+ hdmi_write_reg(idx, FLD_MOD(hdmi_read_reg(idx), val, start, end))
+#define REG_GET(idx, start, end) \
+ FLD_GET(hdmi_read_reg(idx), start, end)
+
+/* HDMI timing structure */
+struct hdmi_timings {
+ struct omap_video_timings timings;
+ int vsync_pol;
+ int hsync_pol;
+};
+
+enum hdmi_phy_pwr {
+ HDMI_PHYPWRCMD_OFF = 0,
+ HDMI_PHYPWRCMD_LDOON = 1,
+ HDMI_PHYPWRCMD_TXON = 2
+};
+
+enum hdmi_pll_pwr {
+ HDMI_PLLPWRCMD_ALLOFF = 0,
+ HDMI_PLLPWRCMD_PLLONLY = 1,
+ HDMI_PLLPWRCMD_BOTHON_ALLCLKS = 2,
+ HDMI_PLLPWRCMD_BOTHON_NOPHYCLK = 3
+};
+
+enum hdmi_clk_refsel {
+ HDMI_REFSEL_PCLK = 0,
+ HDMI_REFSEL_REF1 = 1,
+ HDMI_REFSEL_REF2 = 2,
+ HDMI_REFSEL_SYSCLK = 3
+};
+
+enum hdmi_core_inputbus_width {
+ HDMI_INPUT_8BIT = 0,
+ HDMI_INPUT_10BIT = 1,
+ HDMI_INPUT_12BIT = 2
+};
+
+enum hdmi_core_dither_trunc {
+ HDMI_OUTPUTTRUNCATION_8BIT = 0,
+ HDMI_OUTPUTTRUNCATION_10BIT = 1,
+ HDMI_OUTPUTTRUNCATION_12BIT = 2,
+ HDMI_OUTPUTDITHER_8BIT = 3,
+ HDMI_OUTPUTDITHER_10BIT = 4,
+ HDMI_OUTPUTDITHER_12BIT = 5
+};
+
+enum hdmi_core_deepcolor_ed {
+ HDMI_DEEPCOLORPACKECTDISABLE = 0,
+ HDMI_DEEPCOLORPACKECTENABLE = 1
+};
+
+enum hdmi_core_packet_mode {
+ HDMI_PACKETMODERESERVEDVALUE = 0,
+ HDMI_PACKETMODE24BITPERPIXEL = 4,
+ HDMI_PACKETMODE30BITPERPIXEL = 5,
+ HDMI_PACKETMODE36BITPERPIXEL = 6,
+ HDMI_PACKETMODE48BITPERPIXEL = 7
+};
+
+enum hdmi_core_hdmi_dvi {
+ HDMI_DVI = 0,
+ HDMI_HDMI = 1
+};
+
+enum hdmi_core_tclkselclkmult {
+ HDMI_FPLL05IDCK = 0,
+ HDMI_FPLL10IDCK = 1,
+ HDMI_FPLL20IDCK = 2,
+ HDMI_FPLL40IDCK = 3
+};
+
+enum hdmi_core_packet_ctrl {
+ HDMI_PACKETENABLE = 1,
+ HDMI_PACKETDISABLE = 0,
+ HDMI_PACKETREPEATON = 1,
+ HDMI_PACKETREPEATOFF = 0
+};
+
+/* INFOFRAME_AVI_ definitions */
+enum hdmi_core_infoframe {
+ HDMI_INFOFRAME_AVI_DB1Y_RGB = 0,
+ HDMI_INFOFRAME_AVI_DB1Y_YUV422 = 1,
+ HDMI_INFOFRAME_AVI_DB1Y_YUV444 = 2,
+ HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF = 0,
+ HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_ON = 1,
+ HDMI_INFOFRAME_AVI_DB1B_NO = 0,
+ HDMI_INFOFRAME_AVI_DB1B_VERT = 1,
+ HDMI_INFOFRAME_AVI_DB1B_HORI = 2,
+ HDMI_INFOFRAME_AVI_DB1B_VERTHORI = 3,
+ HDMI_INFOFRAME_AVI_DB1S_0 = 0,
+ HDMI_INFOFRAME_AVI_DB1S_1 = 1,
+ HDMI_INFOFRAME_AVI_DB1S_2 = 2,
+ HDMI_INFOFRAME_AVI_DB2C_NO = 0,
+ HDMI_INFOFRAME_AVI_DB2C_ITU601 = 1,
+ HDMI_INFOFRAME_AVI_DB2C_ITU709 = 2,
+ HDMI_INFOFRAME_AVI_DB2C_EC_EXTENDED = 3,
+ HDMI_INFOFRAME_AVI_DB2M_NO = 0,
+ HDMI_INFOFRAME_AVI_DB2M_43 = 1,
+ HDMI_INFOFRAME_AVI_DB2M_169 = 2,
+ HDMI_INFOFRAME_AVI_DB2R_SAME = 8,
+ HDMI_INFOFRAME_AVI_DB2R_43 = 9,
+ HDMI_INFOFRAME_AVI_DB2R_169 = 10,
+ HDMI_INFOFRAME_AVI_DB2R_149 = 11,
+ HDMI_INFOFRAME_AVI_DB3ITC_NO = 0,
+ HDMI_INFOFRAME_AVI_DB3ITC_YES = 1,
+ HDMI_INFOFRAME_AVI_DB3EC_XVYUV601 = 0,
+ HDMI_INFOFRAME_AVI_DB3EC_XVYUV709 = 1,
+ HDMI_INFOFRAME_AVI_DB3Q_DEFAULT = 0,
+ HDMI_INFOFRAME_AVI_DB3Q_LR = 1,
+ HDMI_INFOFRAME_AVI_DB3Q_FR = 2,
+ HDMI_INFOFRAME_AVI_DB3SC_NO = 0,
+ HDMI_INFOFRAME_AVI_DB3SC_HORI = 1,
+ HDMI_INFOFRAME_AVI_DB3SC_VERT = 2,
+ HDMI_INFOFRAME_AVI_DB3SC_HORIVERT = 3,
+ HDMI_INFOFRAME_AVI_DB5PR_NO = 0,
+ HDMI_INFOFRAME_AVI_DB5PR_2 = 1,
+ HDMI_INFOFRAME_AVI_DB5PR_3 = 2,
+ HDMI_INFOFRAME_AVI_DB5PR_4 = 3,
+ HDMI_INFOFRAME_AVI_DB5PR_5 = 4,
+ HDMI_INFOFRAME_AVI_DB5PR_6 = 5,
+ HDMI_INFOFRAME_AVI_DB5PR_7 = 6,
+ HDMI_INFOFRAME_AVI_DB5PR_8 = 7,
+ HDMI_INFOFRAME_AVI_DB5PR_9 = 8,
+ HDMI_INFOFRAME_AVI_DB5PR_10 = 9
+};
+
+enum hdmi_packing_mode {
+ HDMI_PACK_10b_RGB_YUV444 = 0,
+ HDMI_PACK_24b_RGB_YUV444_YUV422 = 1,
+ HDMI_PACK_20b_YUV422 = 2,
+ HDMI_PACK_ALREADYPACKED = 7
+};
+
+struct hdmi_core_video_config {
+ enum hdmi_core_inputbus_width ip_bus_width;
+ enum hdmi_core_dither_trunc op_dither_truc;
+ enum hdmi_core_deepcolor_ed deep_color_pkt;
+ enum hdmi_core_packet_mode pkt_mode;
+ enum hdmi_core_hdmi_dvi hdmi_dvi;
+ enum hdmi_core_tclkselclkmult tclk_sel_clkmult;
+};
+
+/*
+ * Refer to section 8.2 in HDMI 1.3 specification for
+ * details about infoframe databytes
+ */
+struct hdmi_core_infoframe_avi {
+ u8 db1_format;
+ /* Y0, Y1 rgb,yCbCr */
+ u8 db1_active_info;
+ /* A0 Active information Present */
+ u8 db1_bar_info_dv;
+ /* B0, B1 Bar info data valid */
+ u8 db1_scan_info;
+ /* S0, S1 scan information */
+ u8 db2_colorimetry;
+ /* C0, C1 colorimetry */
+ u8 db2_aspect_ratio;
+ /* M0, M1 Aspect ratio (4:3, 16:9) */
+ u8 db2_active_fmt_ar;
+ /* R0...R3 Active format aspect ratio */
+ u8 db3_itc;
+ /* ITC IT content. */
+ u8 db3_ec;
+ /* EC0, EC1, EC2 Extended colorimetry */
+ u8 db3_q_range;
+ /* Q1, Q0 Quantization range */
+ u8 db3_nup_scaling;
+ /* SC1, SC0 Non-uniform picture scaling */
+ u8 db4_videocode;
+ /* VIC0..6 Video format identification */
+ u8 db5_pixel_repeat;
+ /* PR0..PR3 Pixel repetition factor */
+ u16 db6_7_line_eoftop;
+ /* Line number end of top bar */
+ u16 db8_9_line_sofbottom;
+ /* Line number start of bottom bar */
+ u16 db10_11_pixel_eofleft;
+ /* Pixel number end of left bar */
+ u16 db12_13_pixel_sofright;
+ /* Pixel number start of right bar */
+};
+
+struct hdmi_core_packet_enable_repeat {
+ u32 audio_pkt;
+ u32 audio_pkt_repeat;
+ u32 avi_infoframe;
+ u32 avi_infoframe_repeat;
+ u32 gen_cntrl_pkt;
+ u32 gen_cntrl_pkt_repeat;
+ u32 generic_pkt;
+ u32 generic_pkt_repeat;
+};
+
+struct hdmi_video_format {
+ enum hdmi_packing_mode packing_mode;
+ u32 y_res; /* Line per panel */
+ u32 x_res; /* pixel per line */
+};
+
+struct hdmi_video_interface {
+ int vsp; /* Vsync polarity */
+ int hsp; /* Hsync polarity */
+ int interlacing;
+ int tm; /* Timing mode */
+};
+
+struct hdmi_cm {
+ int code;
+ int mode;
+};
+
+struct hdmi_config {
+ struct hdmi_timings timings;
+ u16 interlace;
+ struct hdmi_cm cm;
+};
+
+#endif
diff --git a/drivers/video/omap2/dss/hdmi_omap4_panel.c b/drivers/video/omap2/dss/hdmi_omap4_panel.c
new file mode 100644
index 00000000000..ffb5de94131
--- /dev/null
+++ b/drivers/video/omap2/dss/hdmi_omap4_panel.c
@@ -0,0 +1,222 @@
+/*
+ * hdmi_omap4_panel.c
+ *
+ * HDMI library support functions for TI OMAP4 processors.
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Authors: Mythri P k <mythripk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <plat/display.h>
+
+#include "dss.h"
+
+static struct {
+ struct mutex hdmi_lock;
+} hdmi;
+
+
+static int hdmi_panel_probe(struct omap_dss_device *dssdev)
+{
+ DSSDBG("ENTER hdmi_panel_probe\n");
+
+ dssdev->panel.config = OMAP_DSS_LCD_TFT |
+ OMAP_DSS_LCD_IVS | OMAP_DSS_LCD_IHS;
+
+ /*
+ * Initialize the timings to 640 * 480
+ * This is only for framebuffer update not for TV timing setting
+ * Setting TV timing will be done only on enable
+ */
+ dssdev->panel.timings.x_res = 640;
+ dssdev->panel.timings.y_res = 480;
+
+ DSSDBG("hdmi_panel_probe x_res= %d y_res = %d\n",
+ dssdev->panel.timings.x_res,
+ dssdev->panel.timings.y_res);
+ return 0;
+}
+
+static void hdmi_panel_remove(struct omap_dss_device *dssdev)
+{
+
+}
+
+static int hdmi_panel_enable(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+ DSSDBG("ENTER hdmi_panel_enable\n");
+
+ mutex_lock(&hdmi.hdmi_lock);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ r = omapdss_hdmi_display_enable(dssdev);
+ if (r) {
+ DSSERR("failed to power on\n");
+ goto err;
+ }
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+err:
+ mutex_unlock(&hdmi.hdmi_lock);
+
+ return r;
+}
+
+static void hdmi_panel_disable(struct omap_dss_device *dssdev)
+{
+ mutex_lock(&hdmi.hdmi_lock);
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ omapdss_hdmi_display_disable(dssdev);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+
+ mutex_unlock(&hdmi.hdmi_lock);
+}
+
+static int hdmi_panel_suspend(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ mutex_lock(&hdmi.hdmi_lock);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+
+ omapdss_hdmi_display_disable(dssdev);
+
+err:
+ mutex_unlock(&hdmi.hdmi_lock);
+
+ return r;
+}
+
+static int hdmi_panel_resume(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ mutex_lock(&hdmi.hdmi_lock);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ r = omapdss_hdmi_display_enable(dssdev);
+ if (r) {
+ DSSERR("failed to power on\n");
+ goto err;
+ }
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+err:
+ mutex_unlock(&hdmi.hdmi_lock);
+
+ return r;
+}
+
+static void hdmi_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ mutex_lock(&hdmi.hdmi_lock);
+
+ *timings = dssdev->panel.timings;
+
+ mutex_unlock(&hdmi.hdmi_lock);
+}
+
+static void hdmi_set_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ DSSDBG("hdmi_set_timings\n");
+
+ mutex_lock(&hdmi.hdmi_lock);
+
+ dssdev->panel.timings = *timings;
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+ /* turn the hdmi off and on to get new timings to use */
+ omapdss_hdmi_display_disable(dssdev);
+ omapdss_hdmi_display_set_timing(dssdev);
+ }
+
+ mutex_unlock(&hdmi.hdmi_lock);
+}
+
+static int hdmi_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ int r = 0;
+
+ DSSDBG("hdmi_check_timings\n");
+
+ mutex_lock(&hdmi.hdmi_lock);
+
+ r = omapdss_hdmi_display_check_timing(dssdev, timings);
+ if (r) {
+ DSSERR("Timing cannot be applied\n");
+ goto err;
+ }
+err:
+ mutex_unlock(&hdmi.hdmi_lock);
+ return r;
+}
+
+static struct omap_dss_driver hdmi_driver = {
+ .probe = hdmi_panel_probe,
+ .remove = hdmi_panel_remove,
+ .enable = hdmi_panel_enable,
+ .disable = hdmi_panel_disable,
+ .suspend = hdmi_panel_suspend,
+ .resume = hdmi_panel_resume,
+ .get_timings = hdmi_get_timings,
+ .set_timings = hdmi_set_timings,
+ .check_timings = hdmi_check_timings,
+ .driver = {
+ .name = "hdmi_panel",
+ .owner = THIS_MODULE,
+ },
+};
+
+int hdmi_panel_init(void)
+{
+ mutex_init(&hdmi.hdmi_lock);
+
+ omap_dss_register_driver(&hdmi_driver);
+
+ return 0;
+}
+
+void hdmi_panel_exit(void)
+{
+ omap_dss_unregister_driver(&hdmi_driver);
+
+}
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index 172d4e69730..bcd37ec8695 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -515,6 +515,8 @@ static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr)
if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC) {
irq = DISPC_IRQ_EVSYNC_ODD;
+ } else if (mgr->device->type == OMAP_DISPLAY_TYPE_HDMI) {
+ irq = DISPC_IRQ_EVSYNC_EVEN;
} else {
if (mgr->id == OMAP_DSS_CHANNEL_LCD)
irq = DISPC_IRQ_VSYNC;
@@ -536,7 +538,8 @@ static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
return 0;
- if (dssdev->type == OMAP_DISPLAY_TYPE_VENC) {
+ if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
+ || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
} else {
if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
@@ -613,7 +616,8 @@ int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
return 0;
- if (dssdev->type == OMAP_DISPLAY_TYPE_VENC) {
+ if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
+ || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
} else {
if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
@@ -1377,6 +1381,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
case OMAP_DISPLAY_TYPE_DBI:
case OMAP_DISPLAY_TYPE_SDI:
case OMAP_DISPLAY_TYPE_VENC:
+ case OMAP_DISPLAY_TYPE_HDMI:
default_get_overlay_fifo_thresholds(ovl->id, size,
&oc->burst_size, &oc->fifo_low,
&oc->fifo_high);
@@ -1394,7 +1399,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
}
r = 0;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
if (!dss_cache.irq_enabled) {
u32 mask;
@@ -1407,7 +1412,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
dss_cache.irq_enabled = true;
}
configure_dispc();
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
spin_unlock_irqrestore(&dss_cache.lock, flags);
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index 456efef03c2..f1aca6d0401 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -490,7 +490,7 @@ static int omap_dss_set_manager(struct omap_overlay *ovl,
ovl->manager = mgr;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
/* XXX: on manual update display, in auto update mode, a bug happens
* here. When an overlay is first enabled on LCD, then it's disabled,
* and the manager is changed to TV, we sometimes get SYNC_LOST_DIGIT
@@ -499,7 +499,7 @@ static int omap_dss_set_manager(struct omap_overlay *ovl,
* but I don't understand how or why. */
msleep(40);
dispc_set_channel_out(ovl->id, mgr->id);
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
return 0;
}
@@ -679,7 +679,8 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force)
lcd2_mgr->set_device(lcd2_mgr, dssdev);
mgr = lcd2_mgr;
}
- } else if (dssdev->type != OMAP_DISPLAY_TYPE_VENC) {
+ } else if (dssdev->type != OMAP_DISPLAY_TYPE_VENC
+ && dssdev->type != OMAP_DISPLAY_TYPE_HDMI) {
if (!lcd_mgr->device || force) {
if (lcd_mgr->device)
lcd_mgr->unset_device(lcd_mgr);
@@ -688,7 +689,8 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force)
}
}
- if (dssdev->type == OMAP_DISPLAY_TYPE_VENC) {
+ if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
+ || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
if (!tv_mgr->device || force) {
if (tv_mgr->device)
tv_mgr->unset_device(tv_mgr);
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index 10a2ffe0288..5ea17f49c61 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -36,8 +36,6 @@
#include <plat/display.h>
#include "dss.h"
-#define RFBI_BASE 0x48050800
-
struct rfbi_reg { u16 idx; };
#define RFBI_REG(idx) ((const struct rfbi_reg) { idx })
@@ -100,6 +98,7 @@ static int rfbi_convert_timings(struct rfbi_timings *t);
static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div);
static struct {
+ struct platform_device *pdev;
void __iomem *base;
unsigned long l4_khz;
@@ -142,9 +141,9 @@ static inline u32 rfbi_read_reg(const struct rfbi_reg idx)
static void rfbi_enable_clocks(bool enable)
{
if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
}
void omap_rfbi_write_command(const void *buf, u32 len)
@@ -497,7 +496,7 @@ unsigned long rfbi_get_max_tx_rate(void)
};
l4_rate = rfbi.l4_khz / 1000;
- dss1_rate = dss_clk_get_rate(DSS_CLK_FCK1) / 1000000;
+ dss1_rate = dss_clk_get_rate(DSS_CLK_FCK) / 1000000;
for (i = 0; i < ARRAY_SIZE(ftab); i++) {
/* Use a window instead of an exact match, to account
@@ -922,7 +921,7 @@ void rfbi_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
DUMPREG(RFBI_REVISION);
DUMPREG(RFBI_SYSCONFIG);
@@ -953,54 +952,10 @@ void rfbi_dump_regs(struct seq_file *s)
DUMPREG(RFBI_VSYNC_WIDTH);
DUMPREG(RFBI_HSYNC_WIDTH);
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
#undef DUMPREG
}
-int rfbi_init(void)
-{
- u32 rev;
- u32 l;
-
- spin_lock_init(&rfbi.cmd_lock);
-
- init_completion(&rfbi.cmd_done);
- atomic_set(&rfbi.cmd_fifo_full, 0);
- atomic_set(&rfbi.cmd_pending, 0);
-
- rfbi.base = ioremap(RFBI_BASE, SZ_256);
- if (!rfbi.base) {
- DSSERR("can't ioremap RFBI\n");
- return -ENOMEM;
- }
-
- rfbi_enable_clocks(1);
-
- msleep(10);
-
- rfbi.l4_khz = dss_clk_get_rate(DSS_CLK_ICK) / 1000;
-
- /* Enable autoidle and smart-idle */
- l = rfbi_read_reg(RFBI_SYSCONFIG);
- l |= (1 << 0) | (2 << 3);
- rfbi_write_reg(RFBI_SYSCONFIG, l);
-
- rev = rfbi_read_reg(RFBI_REVISION);
- printk(KERN_INFO "OMAP RFBI rev %d.%d\n",
- FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
-
- rfbi_enable_clocks(0);
-
- return 0;
-}
-
-void rfbi_exit(void)
-{
- DSSDBG("rfbi_exit\n");
-
- iounmap(rfbi.base);
-}
-
int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev)
{
int r;
@@ -1056,3 +1011,74 @@ int rfbi_init_display(struct omap_dss_device *dssdev)
dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
return 0;
}
+
+/* RFBI HW IP initialisation */
+static int omap_rfbihw_probe(struct platform_device *pdev)
+{
+ u32 rev;
+ u32 l;
+ struct resource *rfbi_mem;
+
+ rfbi.pdev = pdev;
+
+ spin_lock_init(&rfbi.cmd_lock);
+
+ init_completion(&rfbi.cmd_done);
+ atomic_set(&rfbi.cmd_fifo_full, 0);
+ atomic_set(&rfbi.cmd_pending, 0);
+
+ rfbi_mem = platform_get_resource(rfbi.pdev, IORESOURCE_MEM, 0);
+ if (!rfbi_mem) {
+ DSSERR("can't get IORESOURCE_MEM RFBI\n");
+ return -EINVAL;
+ }
+ rfbi.base = ioremap(rfbi_mem->start, resource_size(rfbi_mem));
+ if (!rfbi.base) {
+ DSSERR("can't ioremap RFBI\n");
+ return -ENOMEM;
+ }
+
+ rfbi_enable_clocks(1);
+
+ msleep(10);
+
+ rfbi.l4_khz = dss_clk_get_rate(DSS_CLK_ICK) / 1000;
+
+ /* Enable autoidle and smart-idle */
+ l = rfbi_read_reg(RFBI_SYSCONFIG);
+ l |= (1 << 0) | (2 << 3);
+ rfbi_write_reg(RFBI_SYSCONFIG, l);
+
+ rev = rfbi_read_reg(RFBI_REVISION);
+ dev_dbg(&pdev->dev, "OMAP RFBI rev %d.%d\n",
+ FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+
+ rfbi_enable_clocks(0);
+
+ return 0;
+}
+
+static int omap_rfbihw_remove(struct platform_device *pdev)
+{
+ iounmap(rfbi.base);
+ return 0;
+}
+
+static struct platform_driver omap_rfbihw_driver = {
+ .probe = omap_rfbihw_probe,
+ .remove = omap_rfbihw_remove,
+ .driver = {
+ .name = "omapdss_rfbi",
+ .owner = THIS_MODULE,
+ },
+};
+
+int rfbi_init_platform_driver(void)
+{
+ return platform_driver_register(&omap_rfbihw_driver);
+}
+
+void rfbi_uninit_platform_driver(void)
+{
+ return platform_driver_unregister(&omap_rfbihw_driver);
+}
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index b64adf7dfc8..54a53e64818 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -30,7 +30,6 @@
#include "dss.h"
static struct {
- bool skip_init;
bool update_enabled;
struct regulator *vdds_sdi_reg;
} sdi;
@@ -68,9 +67,7 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err1;
- /* In case of skip_init sdi_init has already enabled the clocks */
- if (!sdi.skip_init)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
sdi_basic_init(dssdev);
@@ -80,14 +77,8 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
dispc_set_pol_freq(dssdev->manager->id, dssdev->panel.config,
dssdev->panel.acbi, dssdev->panel.acb);
- if (!sdi.skip_init) {
- r = dss_calc_clock_div(1, t->pixel_clock * 1000,
- &dss_cinfo, &dispc_cinfo);
- } else {
- r = dss_get_clock_div(&dss_cinfo);
- r = dispc_get_clock_div(dssdev->manager->id, &dispc_cinfo);
- }
-
+ r = dss_calc_clock_div(1, t->pixel_clock * 1000,
+ &dss_cinfo, &dispc_cinfo);
if (r)
goto err2;
@@ -116,21 +107,17 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err2;
- if (!sdi.skip_init) {
- dss_sdi_init(dssdev->phy.sdi.datapairs);
- r = dss_sdi_enable();
- if (r)
- goto err1;
- mdelay(2);
- }
+ dss_sdi_init(dssdev->phy.sdi.datapairs);
+ r = dss_sdi_enable();
+ if (r)
+ goto err1;
+ mdelay(2);
dssdev->manager->enable(dssdev->manager);
- sdi.skip_init = 0;
-
return 0;
err2:
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
regulator_disable(sdi.vdds_sdi_reg);
err1:
omap_dss_stop_device(dssdev);
@@ -145,7 +132,7 @@ void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
dss_sdi_disable();
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
regulator_disable(sdi.vdds_sdi_reg);
@@ -157,25 +144,24 @@ int sdi_init_display(struct omap_dss_device *dssdev)
{
DSSDBG("SDI init\n");
+ if (sdi.vdds_sdi_reg == NULL) {
+ struct regulator *vdds_sdi;
+
+ vdds_sdi = dss_get_vdds_sdi();
+
+ if (IS_ERR(vdds_sdi)) {
+ DSSERR("can't get VDDS_SDI regulator\n");
+ return PTR_ERR(vdds_sdi);
+ }
+
+ sdi.vdds_sdi_reg = vdds_sdi;
+ }
+
return 0;
}
-int sdi_init(bool skip_init)
+int sdi_init(void)
{
- /* we store this for first display enable, then clear it */
- sdi.skip_init = skip_init;
-
- sdi.vdds_sdi_reg = dss_get_vdds_sdi();
- if (IS_ERR(sdi.vdds_sdi_reg)) {
- DSSERR("can't get VDDS_SDI regulator\n");
- return PTR_ERR(sdi.vdds_sdi_reg);
- }
- /*
- * Enable clocks already here, otherwise there would be a toggle
- * of them until sdi_display_enable is called.
- */
- if (skip_init)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
return 0;
}
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index eff35050e28..8e35a5bae42 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -39,8 +39,6 @@
#include "dss.h"
-#define VENC_BASE 0x48050C00
-
/* Venc registers */
#define VENC_REV_ID 0x00
#define VENC_STATUS 0x04
@@ -289,6 +287,7 @@ const struct omap_video_timings omap_dss_ntsc_timings = {
EXPORT_SYMBOL(omap_dss_ntsc_timings);
static struct {
+ struct platform_device *pdev;
void __iomem *base;
struct mutex venc_lock;
u32 wss_data;
@@ -381,11 +380,11 @@ static void venc_reset(void)
static void venc_enable_clocks(int enable)
{
if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_54M |
- DSS_CLK_96M);
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_TVFCK |
+ DSS_CLK_VIDFCK);
else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_54M |
- DSS_CLK_96M);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_TVFCK |
+ DSS_CLK_VIDFCK);
}
static const struct venc_config *venc_timings_to_config(
@@ -641,50 +640,23 @@ static struct omap_dss_driver venc_driver = {
};
/* driver end */
-
-
-int venc_init(struct platform_device *pdev)
+int venc_init_display(struct omap_dss_device *dssdev)
{
- u8 rev_id;
+ DSSDBG("init_display\n");
- mutex_init(&venc.venc_lock);
+ if (venc.vdda_dac_reg == NULL) {
+ struct regulator *vdda_dac;
- venc.wss_data = 0;
+ vdda_dac = regulator_get(&venc.pdev->dev, "vdda_dac");
- venc.base = ioremap(VENC_BASE, SZ_1K);
- if (!venc.base) {
- DSSERR("can't ioremap VENC\n");
- return -ENOMEM;
- }
+ if (IS_ERR(vdda_dac)) {
+ DSSERR("can't get VDDA_DAC regulator\n");
+ return PTR_ERR(vdda_dac);
+ }
- venc.vdda_dac_reg = dss_get_vdda_dac();
- if (IS_ERR(venc.vdda_dac_reg)) {
- iounmap(venc.base);
- DSSERR("can't get VDDA_DAC regulator\n");
- return PTR_ERR(venc.vdda_dac_reg);
+ venc.vdda_dac_reg = vdda_dac;
}
- venc_enable_clocks(1);
-
- rev_id = (u8)(venc_read_reg(VENC_REV_ID) & 0xff);
- printk(KERN_INFO "OMAP VENC rev %d\n", rev_id);
-
- venc_enable_clocks(0);
-
- return omap_dss_register_driver(&venc_driver);
-}
-
-void venc_exit(void)
-{
- omap_dss_unregister_driver(&venc_driver);
-
- iounmap(venc.base);
-}
-
-int venc_init_display(struct omap_dss_device *dssdev)
-{
- DSSDBG("init_display\n");
-
return 0;
}
@@ -740,3 +712,73 @@ void venc_dump_regs(struct seq_file *s)
#undef DUMPREG
}
+
+/* VENC HW IP initialisation */
+static int omap_venchw_probe(struct platform_device *pdev)
+{
+ u8 rev_id;
+ struct resource *venc_mem;
+
+ venc.pdev = pdev;
+
+ mutex_init(&venc.venc_lock);
+
+ venc.wss_data = 0;
+
+ venc_mem = platform_get_resource(venc.pdev, IORESOURCE_MEM, 0);
+ if (!venc_mem) {
+ DSSERR("can't get IORESOURCE_MEM VENC\n");
+ return -EINVAL;
+ }
+ venc.base = ioremap(venc_mem->start, resource_size(venc_mem));
+ if (!venc.base) {
+ DSSERR("can't ioremap VENC\n");
+ return -ENOMEM;
+ }
+
+ venc_enable_clocks(1);
+
+ rev_id = (u8)(venc_read_reg(VENC_REV_ID) & 0xff);
+ dev_dbg(&pdev->dev, "OMAP VENC rev %d\n", rev_id);
+
+ venc_enable_clocks(0);
+
+ return omap_dss_register_driver(&venc_driver);
+}
+
+static int omap_venchw_remove(struct platform_device *pdev)
+{
+ if (venc.vdda_dac_reg != NULL) {
+ regulator_put(venc.vdda_dac_reg);
+ venc.vdda_dac_reg = NULL;
+ }
+ omap_dss_unregister_driver(&venc_driver);
+
+ iounmap(venc.base);
+ return 0;
+}
+
+static struct platform_driver omap_venchw_driver = {
+ .probe = omap_venchw_probe,
+ .remove = omap_venchw_remove,
+ .driver = {
+ .name = "omapdss_venc",
+ .owner = THIS_MODULE,
+ },
+};
+
+int venc_init_platform_driver(void)
+{
+ if (cpu_is_omap44xx())
+ return 0;
+
+ return platform_driver_register(&omap_venchw_driver);
+}
+
+void venc_uninit_platform_driver(void)
+{
+ if (cpu_is_omap44xx())
+ return;
+
+ return platform_driver_unregister(&omap_venchw_driver);
+}
diff --git a/drivers/video/omap2/omapfb/Kconfig b/drivers/video/omap2/omapfb/Kconfig
index 65149b22cf3..231f568b9f9 100644
--- a/drivers/video/omap2/omapfb/Kconfig
+++ b/drivers/video/omap2/omapfb/Kconfig
@@ -1,6 +1,6 @@
menuconfig FB_OMAP2
- tristate "OMAP2/3 frame buffer support (EXPERIMENTAL)"
- depends on FB && OMAP2_DSS
+ tristate "OMAP2+ frame buffer support (EXPERIMENTAL)"
+ depends on FB && OMAP2_DSS && !DRM_OMAP
select OMAP2_VRAM
select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
@@ -8,10 +8,10 @@ menuconfig FB_OMAP2
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
help
- Frame buffer driver for OMAP2/3 based boards.
+ Frame buffer driver for OMAP2+ based boards.
config FB_OMAP2_DEBUG_SUPPORT
- bool "Debug support for OMAP2/3 FB"
+ bool "Debug support for OMAP2+ FB"
default y
depends on FB_OMAP2
help
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index 4fdab8e9c49..505ec667204 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -2090,7 +2090,7 @@ static int omapfb_set_def_mode(struct omapfb2_device *fbdev,
{
int r;
u8 bpp;
- struct omap_video_timings timings;
+ struct omap_video_timings timings, temp_timings;
r = omapfb_mode_to_timings(mode_str, &timings, &bpp);
if (r)
@@ -2100,14 +2100,23 @@ static int omapfb_set_def_mode(struct omapfb2_device *fbdev,
fbdev->bpp_overrides[fbdev->num_bpp_overrides].bpp = bpp;
++fbdev->num_bpp_overrides;
- if (!display->driver->check_timings || !display->driver->set_timings)
- return -EINVAL;
+ if (display->driver->check_timings) {
+ r = display->driver->check_timings(display, &timings);
+ if (r)
+ return r;
+ } else {
+ /* If check_timings is not present compare xres and yres */
+ if (display->driver->get_timings) {
+ display->driver->get_timings(display, &temp_timings);
- r = display->driver->check_timings(display, &timings);
- if (r)
- return r;
+ if (temp_timings.x_res != timings.x_res ||
+ temp_timings.y_res != timings.y_res)
+ return -EINVAL;
+ }
+ }
- display->driver->set_timings(display, &timings);
+ if (display->driver->set_timings)
+ display->driver->set_timings(display, &timings);
return 0;
}
diff --git a/drivers/video/sysfillrect.c b/drivers/video/sysfillrect.c
index 33ee3d34f9d..92fe0be25f6 100644
--- a/drivers/video/sysfillrect.c
+++ b/drivers/video/sysfillrect.c
@@ -256,7 +256,7 @@ void sys_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
p->fix.visual == FB_VISUAL_DIRECTCOLOR )
fg = ((u32 *) (p->pseudo_palette))[rect->color];
else
- fg = rect->color;
+ fg = solid_color(p, rect->color);
pat = pixel_to_pat( bpp, fg);
diff --git a/drivers/video/sysimgblt.c b/drivers/video/sysimgblt.c
index 186c6f607be..bdc45dca24e 100644
--- a/drivers/video/sysimgblt.c
+++ b/drivers/video/sysimgblt.c
@@ -14,6 +14,7 @@
#include <linux/string.h>
#include <linux/fb.h>
#include <asm/types.h>
+#include "fb_draw.h"
#define DEBUG
@@ -80,6 +81,7 @@ static void color_imageblit(const struct fb_image *image, struct fb_info *p,
else
color = *src;
color <<= FB_LEFT_POS(p, bpp);
+ color = solid_color(p, color);
val |= FB_SHIFT_HIGH(p, color, shift);
if (shift >= null_bits) {
*dst++ = val;
@@ -265,8 +267,8 @@ void sys_imageblit(struct fb_info *p, const struct fb_image *image)
fgcolor = ((u32*)(p->pseudo_palette))[image->fg_color];
bgcolor = ((u32*)(p->pseudo_palette))[image->bg_color];
} else {
- fgcolor = image->fg_color;
- bgcolor = image->bg_color;
+ fgcolor = solid_color(p, image->fg_color);
+ bgcolor = solid_color(p, image->bg_color);
}
if (32 % bpp == 0 && !start_index && !pitch_index &&
diff --git a/drivers/video/tmiofb.c b/drivers/video/tmiofb.c
index dfef88c803d..9710bf8caea 100644
--- a/drivers/video/tmiofb.c
+++ b/drivers/video/tmiofb.c
@@ -250,8 +250,7 @@ static irqreturn_t tmiofb_irq(int irq, void *__info)
*/
static int tmiofb_hw_stop(struct platform_device *dev)
{
- struct mfd_cell *cell = dev->dev.platform_data;
- struct tmio_fb_data *data = cell->driver_data;
+ struct tmio_fb_data *data = mfd_get_data(dev);
struct fb_info *info = platform_get_drvdata(dev);
struct tmiofb_par *par = info->par;
@@ -268,7 +267,7 @@ static int tmiofb_hw_stop(struct platform_device *dev)
*/
static int tmiofb_hw_init(struct platform_device *dev)
{
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct fb_info *info = platform_get_drvdata(dev);
struct tmiofb_par *par = info->par;
const struct resource *nlcr = &cell->resources[0];
@@ -312,8 +311,7 @@ static int tmiofb_hw_init(struct platform_device *dev)
*/
static void tmiofb_hw_mode(struct platform_device *dev)
{
- struct mfd_cell *cell = dev->dev.platform_data;
- struct tmio_fb_data *data = cell->driver_data;
+ struct tmio_fb_data *data = mfd_get_data(dev);
struct fb_info *info = platform_get_drvdata(dev);
struct fb_videomode *mode = info->mode;
struct tmiofb_par *par = info->par;
@@ -559,9 +557,8 @@ static int tmiofb_ioctl(struct fb_info *fbi,
static struct fb_videomode *
tmiofb_find_mode(struct fb_info *info, struct fb_var_screeninfo *var)
{
- struct mfd_cell *cell =
- info->device->platform_data;
- struct tmio_fb_data *data = cell->driver_data;
+ struct tmio_fb_data *data =
+ mfd_get_data(to_platform_device(info->device));
struct fb_videomode *best = NULL;
int i;
@@ -581,9 +578,8 @@ static int tmiofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct fb_videomode *mode;
- struct mfd_cell *cell =
- info->device->platform_data;
- struct tmio_fb_data *data = cell->driver_data;
+ struct tmio_fb_data *data =
+ mfd_get_data(to_platform_device(info->device));
mode = tmiofb_find_mode(info, var);
if (!mode || var->bits_per_pixel > 16)
@@ -683,8 +679,8 @@ static struct fb_ops tmiofb_ops = {
static int __devinit tmiofb_probe(struct platform_device *dev)
{
- struct mfd_cell *cell = dev->dev.platform_data;
- struct tmio_fb_data *data = cell->driver_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
+ struct tmio_fb_data *data = mfd_get_data(dev);
struct resource *ccr = platform_get_resource(dev, IORESOURCE_MEM, 1);
struct resource *lcr = platform_get_resource(dev, IORESOURCE_MEM, 0);
struct resource *vram = platform_get_resource(dev, IORESOURCE_MEM, 2);
@@ -811,7 +807,7 @@ err_ioremap_ccr:
static int __devexit tmiofb_remove(struct platform_device *dev)
{
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
struct fb_info *info = platform_get_drvdata(dev);
int irq = platform_get_irq(dev, 0);
struct tmiofb_par *par;
@@ -941,7 +937,7 @@ static int tmiofb_suspend(struct platform_device *dev, pm_message_t state)
#ifdef CONFIG_FB_TMIO_ACCELL
struct tmiofb_par *par = info->par;
#endif
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
int retval = 0;
console_lock();
@@ -973,7 +969,7 @@ static int tmiofb_suspend(struct platform_device *dev, pm_message_t state)
static int tmiofb_resume(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
- struct mfd_cell *cell = dev->dev.platform_data;
+ const struct mfd_cell *cell = mfd_get_cell(dev);
int retval = 0;
console_lock();
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 80b3b123dd7..7c608c5ccf8 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -60,7 +60,7 @@ config W1_MASTER_GPIO
config HDQ_MASTER_OMAP
tristate "OMAP HDQ driver"
- depends on ARCH_OMAP2430 || ARCH_OMAP3
+ depends on SOC_OMAP2430 || ARCH_OMAP3
help
Say Y here if you want support for the 1-wire or HDQ Interface
on an OMAP processor.
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c
index 6b85e7fefa4..95921b77cf8 100644
--- a/drivers/w1/masters/ds1wm.c
+++ b/drivers/w1/masters/ds1wm.c
@@ -90,7 +90,7 @@ struct ds1wm_data {
void __iomem *map;
int bus_shift; /* # of shifts to calc register offsets */
struct platform_device *pdev;
- struct mfd_cell *cell;
+ const struct mfd_cell *cell;
int irq;
int active_high;
int slave_present;
@@ -216,7 +216,7 @@ static int ds1wm_find_divisor(int gclk)
static void ds1wm_up(struct ds1wm_data *ds1wm_data)
{
int divisor;
- struct ds1wm_driver_data *plat = ds1wm_data->cell->driver_data;
+ struct ds1wm_driver_data *plat = mfd_get_data(ds1wm_data->pdev);
if (ds1wm_data->cell->enable)
ds1wm_data->cell->enable(ds1wm_data->pdev);
@@ -330,16 +330,11 @@ static int ds1wm_probe(struct platform_device *pdev)
struct ds1wm_data *ds1wm_data;
struct ds1wm_driver_data *plat;
struct resource *res;
- struct mfd_cell *cell;
int ret;
if (!pdev)
return -ENODEV;
- cell = pdev->dev.platform_data;
- if (!cell)
- return -ENODEV;
-
ds1wm_data = kzalloc(sizeof(*ds1wm_data), GFP_KERNEL);
if (!ds1wm_data)
return -ENOMEM;
@@ -356,13 +351,13 @@ static int ds1wm_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto err0;
}
- plat = cell->driver_data;
+ plat = mfd_get_data(pdev);
/* calculate bus shift from mem resource */
ds1wm_data->bus_shift = resource_size(res) >> 3;
ds1wm_data->pdev = pdev;
- ds1wm_data->cell = cell;
+ ds1wm_data->cell = mfd_get_cell(pdev);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index 3dd4971160e..2b4acb86c19 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -124,6 +124,8 @@ static void omap_wdt_set_timeout(struct omap_wdt_dev *wdev)
u32 pre_margin = GET_WLDR_VAL(timer_margin);
void __iomem *base = wdev->base;
+ pm_runtime_get_sync(wdev->dev);
+
/* just count up at 32 KHz */
while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x04)
cpu_relax();
@@ -131,6 +133,8 @@ static void omap_wdt_set_timeout(struct omap_wdt_dev *wdev)
__raw_writel(pre_margin, base + OMAP_WATCHDOG_LDR);
while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x04)
cpu_relax();
+
+ pm_runtime_put_sync(wdev->dev);
}
/*
@@ -160,6 +164,8 @@ static int omap_wdt_open(struct inode *inode, struct file *file)
omap_wdt_ping(wdev); /* trigger loading of new timeout value */
omap_wdt_enable(wdev);
+ pm_runtime_put_sync(wdev->dev);
+
return nonseekable_open(inode, file);
}
@@ -171,6 +177,7 @@ static int omap_wdt_release(struct inode *inode, struct file *file)
* Shut off the timer unless NOWAYOUT is defined.
*/
#ifndef CONFIG_WATCHDOG_NOWAYOUT
+ pm_runtime_get_sync(wdev->dev);
omap_wdt_disable(wdev);
@@ -190,9 +197,11 @@ static ssize_t omap_wdt_write(struct file *file, const char __user *data,
/* Refresh LOAD_TIME. */
if (len) {
+ pm_runtime_get_sync(wdev->dev);
spin_lock(&wdt_lock);
omap_wdt_ping(wdev);
spin_unlock(&wdt_lock);
+ pm_runtime_put_sync(wdev->dev);
}
return len;
}
@@ -224,15 +233,18 @@ static long omap_wdt_ioctl(struct file *file, unsigned int cmd,
return put_user(omap_prcm_get_reset_sources(),
(int __user *)arg);
case WDIOC_KEEPALIVE:
+ pm_runtime_get_sync(wdev->dev);
spin_lock(&wdt_lock);
omap_wdt_ping(wdev);
spin_unlock(&wdt_lock);
+ pm_runtime_put_sync(wdev->dev);
return 0;
case WDIOC_SETTIMEOUT:
if (get_user(new_margin, (int __user *)arg))
return -EFAULT;
omap_wdt_adjust_timeout(new_margin);
+ pm_runtime_get_sync(wdev->dev);
spin_lock(&wdt_lock);
omap_wdt_disable(wdev);
omap_wdt_set_timeout(wdev);
@@ -240,6 +252,7 @@ static long omap_wdt_ioctl(struct file *file, unsigned int cmd,
omap_wdt_ping(wdev);
spin_unlock(&wdt_lock);
+ pm_runtime_put_sync(wdev->dev);
/* Fall */
case WDIOC_GETTIMEOUT:
return put_user(timer_margin, (int __user *)arg);
@@ -345,8 +358,11 @@ static void omap_wdt_shutdown(struct platform_device *pdev)
{
struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
- if (wdev->omap_wdt_users)
+ if (wdev->omap_wdt_users) {
+ pm_runtime_get_sync(wdev->dev);
omap_wdt_disable(wdev);
+ pm_runtime_put_sync(wdev->dev);
+ }
}
static int __devexit omap_wdt_remove(struct platform_device *pdev)
@@ -381,8 +397,11 @@ static int omap_wdt_suspend(struct platform_device *pdev, pm_message_t state)
{
struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
- if (wdev->omap_wdt_users)
+ if (wdev->omap_wdt_users) {
+ pm_runtime_get_sync(wdev->dev);
omap_wdt_disable(wdev);
+ pm_runtime_put_sync(wdev->dev);
+ }
return 0;
}
@@ -392,8 +411,10 @@ static int omap_wdt_resume(struct platform_device *pdev)
struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
if (wdev->omap_wdt_users) {
+ pm_runtime_get_sync(wdev->dev);
omap_wdt_enable(wdev);
omap_wdt_ping(wdev);
+ pm_runtime_put_sync(wdev->dev);
}
return 0;
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index 3939e53f5f9..d8e725082fd 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -37,6 +37,7 @@
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/mfd/rdc321x.h>
+#include <linux/mfd/core.h>
#define RDC_WDT_MASK 0x80000000 /* Mask */
#define RDC_WDT_EN 0x00800000 /* Enable bit */
@@ -231,7 +232,7 @@ static int __devinit rdc321x_wdt_probe(struct platform_device *pdev)
struct resource *r;
struct rdc321x_wdt_pdata *pdata;
- pdata = platform_get_drvdata(pdev);
+ pdata = mfd_get_data(pdev);
if (!pdata) {
dev_err(&pdev->dev, "no platform data supplied\n");
return -ENODEV;
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 9127eda2145..0a0efe713bc 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -278,7 +278,7 @@ static struct miscdevice sp805_wdt_miscdev = {
};
static int __devinit
-sp805_wdt_probe(struct amba_device *adev, struct amba_id *id)
+sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret = 0;