aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/base/power/main.c66
-rw-r--r--drivers/char/Kconfig17
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/dcc_tty.c326
-rw-r--r--drivers/char/mem.c17
-rw-r--r--drivers/cpufreq/Kconfig28
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/cpufreq.c48
-rw-r--r--drivers/cpufreq/cpufreq_governor.c44
-rw-r--r--drivers/cpufreq/cpufreq_governor.h1
-rw-r--r--drivers/cpufreq/cpufreq_interactive.c1341
-rw-r--r--drivers/cpufreq/cpufreq_stats.c25
-rw-r--r--drivers/cpuidle/governors/menu.c7
-rw-r--r--drivers/gpio/gpiolib.c10
-rw-r--r--drivers/hid/hid-input.c10
-rw-r--r--drivers/hid/hid-multitouch.c24
-rw-r--r--drivers/iio/industrialio-event.c12
-rw-r--r--drivers/input/Kconfig9
-rw-r--r--drivers/input/Makefile1
-rw-r--r--drivers/input/evdev.c53
-rw-r--r--drivers/input/keyreset.c239
-rw-r--r--drivers/input/misc/Kconfig16
-rw-r--r--drivers/input/misc/Makefile2
-rw-r--r--drivers/input/misc/gpio_axis.c192
-rw-r--r--drivers/input/misc/gpio_event.c228
-rw-r--r--drivers/input/misc/gpio_input.c390
-rw-r--r--drivers/input/misc/gpio_matrix.c441
-rw-r--r--drivers/input/misc/gpio_output.c97
-rw-r--r--drivers/input/misc/keychord.c391
-rw-r--r--drivers/misc/Kconfig4
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/uid_stat.c152
-rw-r--r--drivers/mmc/card/Kconfig9
-rw-r--r--drivers/mmc/card/block.c36
-rw-r--r--drivers/mmc/core/Kconfig15
-rw-r--r--drivers/mmc/core/core.c110
-rw-r--r--drivers/mmc/core/host.c10
-rw-r--r--drivers/mmc/core/sd.c83
-rw-r--r--drivers/mmc/core/sdio.c111
-rw-r--r--drivers/mmc/core/sdio_bus.c13
-rwxr-xr-x[-rw-r--r--]drivers/mmc/core/sdio_io.c33
-rw-r--r--drivers/mtd/nand/Kconfig10
-rw-r--r--drivers/net/ppp/Kconfig17
-rw-r--r--drivers/net/ppp/Makefile2
-rw-r--r--drivers/net/ppp/pppolac.c449
-rw-r--r--drivers/net/ppp/pppopns.c428
-rw-r--r--drivers/net/tun.c6
-rw-r--r--drivers/net/wireless/Kconfig5
-rw-r--r--drivers/power/power_supply_core.c31
-rw-r--r--drivers/power/power_supply_sysfs.c4
-rw-r--r--drivers/staging/android/Kconfig11
-rw-r--r--drivers/staging/android/Makefile2
-rw-r--r--drivers/staging/android/TODO10
-rw-r--r--drivers/staging/android/android_alarm.h44
-rw-r--r--drivers/staging/android/ashmem.c65
-rw-r--r--drivers/staging/android/ashmem.h30
-rw-r--r--drivers/staging/android/binder.c321
-rw-r--r--drivers/staging/android/binder.h80
-rw-r--r--drivers/staging/android/binder_trace.h8
-rw-r--r--drivers/staging/android/ion/Kconfig35
-rw-r--r--drivers/staging/android/ion/Makefile10
-rw-r--r--drivers/staging/android/ion/compat_ion.c177
-rw-r--r--drivers/staging/android/ion/compat_ion.h30
-rw-r--r--drivers/staging/android/ion/ion.c1634
-rw-r--r--drivers/staging/android/ion/ion.h204
-rw-r--r--drivers/staging/android/ion/ion_carveout_heap.c194
-rw-r--r--drivers/staging/android/ion/ion_chunk_heap.c195
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c218
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c158
-rw-r--r--drivers/staging/android/ion/ion_heap.c369
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c190
-rw-r--r--drivers/staging/android/ion/ion_priv.h405
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c446
-rw-r--r--drivers/staging/android/ion/ion_test.c282
-rw-r--r--drivers/staging/android/ion/tegra/Makefile1
-rw-r--r--drivers/staging/android/ion/tegra/tegra_ion.c84
-rw-r--r--drivers/staging/android/lowmemorykiller.c110
-rw-r--r--drivers/staging/android/sw_sync.h37
-rw-r--r--drivers/staging/android/sync.c14
-rw-r--r--drivers/staging/android/sync.h86
-rw-r--r--drivers/staging/android/uapi/android_alarm.h62
-rw-r--r--drivers/staging/android/uapi/ashmem.h47
-rw-r--r--drivers/staging/android/uapi/binder.h330
-rw-r--r--drivers/staging/android/uapi/ion.h196
-rw-r--r--drivers/staging/android/uapi/ion_test.h70
-rw-r--r--drivers/staging/android/uapi/sw_sync.h32
-rw-r--r--drivers/staging/android/uapi/sync.h97
-rw-r--r--drivers/switch/Kconfig15
-rw-r--r--drivers/switch/Makefile4
-rw-r--r--drivers/switch/switch_class.c174
-rw-r--r--drivers/switch/switch_gpio.c172
-rw-r--r--drivers/tty/serial/serial_core.c3
-rw-r--r--drivers/usb/gadget/Kconfig18
-rw-r--r--drivers/usb/gadget/Makefile2
-rw-r--r--drivers/usb/gadget/android.c1519
-rw-r--r--drivers/usb/gadget/composite.c10
-rw-r--r--drivers/usb/gadget/f_accessory.c1180
-rw-r--r--drivers/usb/gadget/f_audio_source.c828
-rw-r--r--drivers/usb/gadget/f_fs.c7
-rw-r--r--drivers/usb/gadget/f_mtp.c1285
-rw-r--r--drivers/usb/gadget/f_rndis.c10
-rw-r--r--drivers/usb/gadget/rndis.c11
-rw-r--r--drivers/usb/gadget/u_serial.c1
-rw-r--r--drivers/usb/gadget/udc-core.c10
-rw-r--r--drivers/usb/phy/Kconfig8
-rw-r--r--drivers/usb/phy/Makefile1
-rw-r--r--drivers/usb/phy/otg-wakelock.c173
-rw-r--r--drivers/video/Kconfig1
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/adf/Kconfig14
-rw-r--r--drivers/video/adf/Makefile15
-rw-r--r--drivers/video/adf/adf.c1166
-rw-r--r--drivers/video/adf/adf.h71
-rw-r--r--drivers/video/adf/adf_client.c810
-rw-r--r--drivers/video/adf/adf_fbdev.c651
-rw-r--r--drivers/video/adf/adf_fops.c957
-rw-r--r--drivers/video/adf/adf_fops.h37
-rw-r--r--drivers/video/adf/adf_fops32.c217
-rw-r--r--drivers/video/adf/adf_fops32.h78
-rw-r--r--drivers/video/adf/adf_format.c280
-rw-r--r--drivers/video/adf/adf_memblock.c149
-rw-r--r--drivers/video/adf/adf_sysfs.c296
-rw-r--r--drivers/video/adf/adf_sysfs.h33
-rw-r--r--drivers/video/adf/adf_trace.h93
-rw-r--r--drivers/w1/masters/ds2482.c47
127 files changed, 21673 insertions, 527 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index d27feb5460f..ba3c789ad9b 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -100,6 +100,8 @@ source "drivers/memstick/Kconfig"
source "drivers/leds/Kconfig"
+source "drivers/switch/Kconfig"
+
source "drivers/accessibility/Kconfig"
source "drivers/infiniband/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 092a62e7968..1c92047b81b 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -111,6 +111,7 @@ obj-$(CONFIG_CPU_IDLE) += cpuidle/
obj-y += mmc/
obj-$(CONFIG_MEMSTICK) += memstick/
obj-y += leds/
+obj-$(CONFIG_SWITCH) += switch/
obj-$(CONFIG_INFINIBAND) += infiniband/
obj-$(CONFIG_SGI_SN) += sn/
obj-y += firmware/
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 5a9b6569dd7..6a33dd85c04 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -29,6 +29,8 @@
#include <linux/async.h>
#include <linux/suspend.h>
#include <linux/cpuidle.h>
+#include <linux/timer.h>
+
#include "../base.h"
#include "power.h"
@@ -54,6 +56,12 @@ struct suspend_stats suspend_stats;
static DEFINE_MUTEX(dpm_list_mtx);
static pm_message_t pm_transition;
+struct dpm_watchdog {
+ struct device *dev;
+ struct task_struct *tsk;
+ struct timer_list timer;
+};
+
static int async_error;
/**
@@ -384,6 +392,56 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,
return error;
}
+/**
+ * dpm_wd_handler - Driver suspend / resume watchdog handler.
+ *
+ * Called when a driver has timed out suspending or resuming.
+ * There's not much we can do here to recover so BUG() out for
+ * a crash-dump
+ */
+static void dpm_wd_handler(unsigned long data)
+{
+ struct dpm_watchdog *wd = (void *)data;
+ struct device *dev = wd->dev;
+ struct task_struct *tsk = wd->tsk;
+
+ dev_emerg(dev, "**** DPM device timeout ****\n");
+ show_stack(tsk, NULL);
+
+ BUG();
+}
+
+/**
+ * dpm_wd_set - Enable pm watchdog for given device.
+ * @wd: Watchdog. Must be allocated on the stack.
+ * @dev: Device to handle.
+ */
+static void dpm_wd_set(struct dpm_watchdog *wd, struct device *dev)
+{
+ struct timer_list *timer = &wd->timer;
+
+ wd->dev = dev;
+ wd->tsk = get_current();
+
+ init_timer_on_stack(timer);
+ timer->expires = jiffies + HZ * 12;
+ timer->function = dpm_wd_handler;
+ timer->data = (unsigned long)wd;
+ add_timer(timer);
+}
+
+/**
+ * dpm_wd_clear - Disable pm watchdog.
+ * @wd: Watchdog to disable.
+ */
+static void dpm_wd_clear(struct dpm_watchdog *wd)
+{
+ struct timer_list *timer = &wd->timer;
+
+ del_timer_sync(timer);
+ destroy_timer_on_stack(timer);
+}
+
/*------------------------- Resume routines -------------------------*/
/**
@@ -570,6 +628,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
pm_callback_t callback = NULL;
char *info = NULL;
int error = 0;
+ struct dpm_watchdog wd;
TRACE_DEVICE(dev);
TRACE_RESUME(0);
@@ -585,6 +644,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
* a resumed device, even if the device hasn't been completed yet.
*/
dev->power.is_prepared = false;
+ dpm_wd_set(&wd, dev);
if (!dev->power.is_suspended)
goto Unlock;
@@ -636,6 +696,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
Unlock:
device_unlock(dev);
+ dpm_wd_clear(&wd);
Complete:
complete_all(&dev->power.completion);
@@ -1053,6 +1114,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
pm_callback_t callback = NULL;
char *info = NULL;
int error = 0;
+ struct dpm_watchdog wd;
dpm_wait_for_children(dev, async);
@@ -1075,6 +1137,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (dev->power.syscore)
goto Complete;
+
+ dpm_wd_set(&wd, dev);
device_lock(dev);
@@ -1131,6 +1195,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
device_unlock(dev);
+ dpm_wd_clear(&wd);
+
Complete:
complete_all(&dev->power.completion);
if (error)
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 3bb6fa3930b..6fcb9b01e57 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -6,6 +6,19 @@ menu "Character devices"
source "drivers/tty/Kconfig"
+config DEVMEM
+ bool "Memory device driver"
+ default y
+ help
+ The memory driver provides two character devices, mem and kmem, which
+ provide access to the system's memory. The mem device is a view of
+ physical memory, and each byte in the device corresponds to the
+ matching physical address. The kmem device is the same as mem, but
+ the addresses correspond to the kernel's virtual address space rather
+ than physical memory. These devices are standard parts of a Linux
+ system and most users should say Y here. You might say N if very
+ security conscience or memory is tight.
+
config DEVKMEM
bool "/dev/kmem virtual device support"
default y
@@ -584,6 +597,10 @@ config DEVPORT
depends on ISA || PCI
default y
+config DCC_TTY
+ tristate "DCC tty driver"
+ depends on ARM
+
source "drivers/s390/char/Kconfig"
config MSM_SMD_PKT
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 7ff1d0d208a..e0047ed1e74 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_PCMCIA) += pcmcia/
obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o
obj-$(CONFIG_TCG_TPM) += tpm/
+obj-$(CONFIG_DCC_TTY) += dcc_tty.o
obj-$(CONFIG_PS3_FLASH) += ps3flash.o
obj-$(CONFIG_JS_RTC) += js-rtc.o
diff --git a/drivers/char/dcc_tty.c b/drivers/char/dcc_tty.c
new file mode 100644
index 00000000000..0a62d410286
--- /dev/null
+++ b/drivers/char/dcc_tty.c
@@ -0,0 +1,326 @@
+/* drivers/char/dcc_tty.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/console.h>
+#include <linux/hrtimer.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+MODULE_DESCRIPTION("DCC TTY Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+
+DEFINE_SPINLOCK(g_dcc_tty_lock);
+static struct hrtimer g_dcc_timer;
+static char g_dcc_buffer[16];
+static int g_dcc_buffer_head;
+static int g_dcc_buffer_count;
+static unsigned g_dcc_write_delay_usecs = 1;
+static struct tty_driver *g_dcc_tty_driver;
+static struct tty_struct *g_dcc_tty;
+static int g_dcc_tty_open_count;
+
+static void dcc_poll_locked(void)
+{
+ char ch;
+ int rch;
+ int written;
+
+ while (g_dcc_buffer_count) {
+ ch = g_dcc_buffer[g_dcc_buffer_head];
+ asm(
+ "mrc 14, 0, r15, c0, c1, 0\n"
+ "mcrcc 14, 0, %1, c0, c5, 0\n"
+ "movcc %0, #1\n"
+ "movcs %0, #0\n"
+ : "=r" (written)
+ : "r" (ch)
+ );
+ if (written) {
+ if (ch == '\n')
+ g_dcc_buffer[g_dcc_buffer_head] = '\r';
+ else {
+ g_dcc_buffer_head = (g_dcc_buffer_head + 1) % ARRAY_SIZE(g_dcc_buffer);
+ g_dcc_buffer_count--;
+ if (g_dcc_tty)
+ tty_wakeup(g_dcc_tty);
+ }
+ g_dcc_write_delay_usecs = 1;
+ } else {
+ if (g_dcc_write_delay_usecs > 0x100)
+ break;
+ g_dcc_write_delay_usecs <<= 1;
+ udelay(g_dcc_write_delay_usecs);
+ }
+ }
+
+ if (g_dcc_tty && !test_bit(TTY_THROTTLED, &g_dcc_tty->flags)) {
+ asm(
+ "mrc 14, 0, %0, c0, c1, 0\n"
+ "tst %0, #(1 << 30)\n"
+ "moveq %0, #-1\n"
+ "mrcne 14, 0, %0, c0, c5, 0\n"
+ : "=r" (rch)
+ );
+ if (rch >= 0) {
+ ch = rch;
+ tty_insert_flip_string(g_dcc_tty->port, &ch, 1);
+ tty_flip_buffer_push(g_dcc_tty->port);
+ }
+ }
+
+
+ if (g_dcc_buffer_count)
+ hrtimer_start(&g_dcc_timer, ktime_set(0, g_dcc_write_delay_usecs * NSEC_PER_USEC), HRTIMER_MODE_REL);
+ else
+ hrtimer_start(&g_dcc_timer, ktime_set(0, 20 * NSEC_PER_MSEC), HRTIMER_MODE_REL);
+}
+
+static int dcc_tty_open(struct tty_struct * tty, struct file * filp)
+{
+ int ret;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+ if (g_dcc_tty == NULL || g_dcc_tty == tty) {
+ g_dcc_tty = tty;
+ g_dcc_tty_open_count++;
+ ret = 0;
+ } else
+ ret = -EBUSY;
+ spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+
+ printk("dcc_tty_open, tty %p, f_flags %x, returned %d\n", tty, filp->f_flags, ret);
+
+ return ret;
+}
+
+static void dcc_tty_close(struct tty_struct * tty, struct file * filp)
+{
+ printk("dcc_tty_close, tty %p, f_flags %x\n", tty, filp->f_flags);
+ if (g_dcc_tty == tty) {
+ if (--g_dcc_tty_open_count == 0)
+ g_dcc_tty = NULL;
+ }
+}
+
+static int dcc_write(const unsigned char *buf_start, int count)
+{
+ const unsigned char *buf = buf_start;
+ unsigned long irq_flags;
+ int copy_len;
+ int space_left;
+ int tail;
+
+ if (count < 1)
+ return 0;
+
+ spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+ do {
+ tail = (g_dcc_buffer_head + g_dcc_buffer_count) % ARRAY_SIZE(g_dcc_buffer);
+ copy_len = ARRAY_SIZE(g_dcc_buffer) - tail;
+ space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+ if (copy_len > space_left)
+ copy_len = space_left;
+ if (copy_len > count)
+ copy_len = count;
+ memcpy(&g_dcc_buffer[tail], buf, copy_len);
+ g_dcc_buffer_count += copy_len;
+ buf += copy_len;
+ count -= copy_len;
+ if (copy_len < count && copy_len < space_left) {
+ space_left -= copy_len;
+ copy_len = count;
+ if (copy_len > space_left) {
+ copy_len = space_left;
+ }
+ memcpy(g_dcc_buffer, buf, copy_len);
+ buf += copy_len;
+ count -= copy_len;
+ g_dcc_buffer_count += copy_len;
+ }
+ dcc_poll_locked();
+ space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+ } while(count && space_left);
+ spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+ return buf - buf_start;
+}
+
+static int dcc_tty_write(struct tty_struct * tty, const unsigned char *buf, int count)
+{
+ int ret;
+ /* printk("dcc_tty_write %p, %d\n", buf, count); */
+ ret = dcc_write(buf, count);
+ if (ret != count)
+ printk("dcc_tty_write %p, %d, returned %d\n", buf, count, ret);
+ return ret;
+}
+
+static int dcc_tty_write_room(struct tty_struct *tty)
+{
+ int space_left;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+ space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+ spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+ return space_left;
+}
+
+static int dcc_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ int ret;
+ asm(
+ "mrc 14, 0, %0, c0, c1, 0\n"
+ "mov %0, %0, LSR #30\n"
+ "and %0, %0, #1\n"
+ : "=r" (ret)
+ );
+ return ret;
+}
+
+static void dcc_tty_unthrottle(struct tty_struct * tty)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+ dcc_poll_locked();
+ spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+}
+
+static enum hrtimer_restart dcc_tty_timer_func(struct hrtimer *timer)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+ dcc_poll_locked();
+ spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+ return HRTIMER_NORESTART;
+}
+
+void dcc_console_write(struct console *co, const char *b, unsigned count)
+{
+#if 1
+ dcc_write(b, count);
+#else
+ /* blocking printk */
+ while (count > 0) {
+ int written;
+ written = dcc_write(b, count);
+ if (written) {
+ b += written;
+ count -= written;
+ }
+ }
+#endif
+}
+
+static struct tty_driver *dcc_console_device(struct console *c, int *index)
+{
+ *index = 0;
+ return g_dcc_tty_driver;
+}
+
+static int __init dcc_console_setup(struct console *co, char *options)
+{
+ if (co->index != 0)
+ return -ENODEV;
+ return 0;
+}
+
+
+static struct console dcc_console =
+{
+ .name = "ttyDCC",
+ .write = dcc_console_write,
+ .device = dcc_console_device,
+ .setup = dcc_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+static struct tty_operations dcc_tty_ops = {
+ .open = dcc_tty_open,
+ .close = dcc_tty_close,
+ .write = dcc_tty_write,
+ .write_room = dcc_tty_write_room,
+ .chars_in_buffer = dcc_tty_chars_in_buffer,
+ .unthrottle = dcc_tty_unthrottle,
+};
+
+static int __init dcc_tty_init(void)
+{
+ int ret;
+
+ hrtimer_init(&g_dcc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ g_dcc_timer.function = dcc_tty_timer_func;
+
+ g_dcc_tty_driver = alloc_tty_driver(1);
+ if (!g_dcc_tty_driver) {
+ printk(KERN_ERR "dcc_tty_probe: alloc_tty_driver failed\n");
+ ret = -ENOMEM;
+ goto err_alloc_tty_driver_failed;
+ }
+ g_dcc_tty_driver->owner = THIS_MODULE;
+ g_dcc_tty_driver->driver_name = "dcc";
+ g_dcc_tty_driver->name = "ttyDCC";
+ g_dcc_tty_driver->major = 0; // auto assign
+ g_dcc_tty_driver->minor_start = 0;
+ g_dcc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ g_dcc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ g_dcc_tty_driver->init_termios = tty_std_termios;
+ g_dcc_tty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ tty_set_operations(g_dcc_tty_driver, &dcc_tty_ops);
+ ret = tty_register_driver(g_dcc_tty_driver);
+ if (ret) {
+ printk(KERN_ERR "dcc_tty_probe: tty_register_driver failed, %d\n", ret);
+ goto err_tty_register_driver_failed;
+ }
+ tty_register_device(g_dcc_tty_driver, 0, NULL);
+
+ register_console(&dcc_console);
+ hrtimer_start(&g_dcc_timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+
+ return 0;
+
+err_tty_register_driver_failed:
+ put_tty_driver(g_dcc_tty_driver);
+ g_dcc_tty_driver = NULL;
+err_alloc_tty_driver_failed:
+ return ret;
+}
+
+static void __exit dcc_tty_exit(void)
+{
+ int ret;
+
+ tty_unregister_device(g_dcc_tty_driver, 0);
+ ret = tty_unregister_driver(g_dcc_tty_driver);
+ if (ret < 0) {
+ printk(KERN_ERR "dcc_tty_remove: tty_unregister_driver failed, %d\n", ret);
+ } else {
+ put_tty_driver(g_dcc_tty_driver);
+ }
+ g_dcc_tty_driver = NULL;
+}
+
+module_init(dcc_tty_init);
+module_exit(dcc_tty_exit);
+
+
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 1ccbe9482fa..38d3069b7f0 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -60,6 +60,7 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
}
#endif
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
#ifdef CONFIG_STRICT_DEVMEM
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
@@ -85,7 +86,9 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
return 1;
}
#endif
+#endif
+#ifdef CONFIG_DEVMEM
void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
{
}
@@ -212,6 +215,9 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
*ppos += written;
return written;
}
+#endif /* CONFIG_DEVMEM */
+
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
int __weak phys_mem_access_prot_allowed(struct file *file,
unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
@@ -333,6 +339,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
}
return 0;
}
+#endif /* CONFIG_DEVMEM */
#ifdef CONFIG_DEVKMEM
static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
@@ -727,6 +734,8 @@ static loff_t null_lseek(struct file *file, loff_t offset, int orig)
return file->f_pos = 0;
}
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
+
/*
* The memory devices use the full 32/64 bits of the offset, and so we cannot
* check against negative addresses: they are ok. The return value is weird,
@@ -760,10 +769,14 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
return ret;
}
+#endif
+
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
static int open_port(struct inode *inode, struct file *filp)
{
return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
}
+#endif
#define zero_lseek null_lseek
#define full_lseek null_lseek
@@ -774,6 +787,7 @@ static int open_port(struct inode *inode, struct file *filp)
#define open_kmem open_mem
#define open_oldmem open_mem
+#ifdef CONFIG_DEVMEM
static const struct file_operations mem_fops = {
.llseek = memory_lseek,
.read = read_mem,
@@ -782,6 +796,7 @@ static const struct file_operations mem_fops = {
.open = open_mem,
.get_unmapped_area = get_unmapped_area_mem,
};
+#endif
#ifdef CONFIG_DEVKMEM
static const struct file_operations kmem_fops = {
@@ -851,7 +866,9 @@ static const struct memdev {
const struct file_operations *fops;
struct backing_dev_info *dev_info;
} devlist[] = {
+#ifdef CONFIG_DEVMEM
[1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
+#endif
#ifdef CONFIG_DEVKMEM
[2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
#endif
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index a9c1324843e..9e1f7d9b52b 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -102,6 +102,16 @@ config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
Be aware that not all cpufreq drivers support the conservative
governor. If unsure have a look at the help section of the
driver. Fallback governor will be the performance governor.
+
+config CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+ bool "interactive"
+ select CPU_FREQ_GOV_INTERACTIVE
+ help
+ Use the CPUFreq governor 'interactive' as default. This allows
+ you to get a full dynamic cpu frequency capable system by simply
+ loading your cpufreq low-level hardware driver, using the
+ 'interactive' governor for latency-sensitive workloads.
+
endchoice
config CPU_FREQ_GOV_PERFORMANCE
@@ -160,6 +170,24 @@ config CPU_FREQ_GOV_ONDEMAND
If in doubt, say N.
+config CPU_FREQ_GOV_INTERACTIVE
+ tristate "'interactive' cpufreq policy governor"
+ default n
+ help
+ 'interactive' - This driver adds a dynamic cpufreq policy governor
+ designed for latency-sensitive workloads.
+
+ This governor attempts to reduce the latency of clock
+ increases so that the system is more responsive to
+ interactive workloads.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cpufreq_interactive.
+
+ For details, take a look at linux/Documentation/cpu-freq.
+
+ If in doubt, say N.
+
config CPU_FREQ_GOV_CONSERVATIVE
tristate "'conservative' cpufreq governor"
depends on CPU_FREQ
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 1db9b4929cf..fdda5fdbbc4 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o
obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
+obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o
obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
# CPUfreq cross-arch helpers
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 648554742a9..6b53d05f744 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -17,7 +17,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <asm/cputime.h>
#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/notifier.h>
@@ -25,6 +27,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
+#include <linux/tick.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/cpu.h>
@@ -132,6 +135,51 @@ bool have_governor_per_policy(void)
{
return cpufreq_driver->have_governor_per_policy;
}
+EXPORT_SYMBOL_GPL(have_governor_per_policy);
+
+struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
+{
+ if (have_governor_per_policy())
+ return &policy->kobj;
+ else
+ return cpufreq_global_kobject;
+}
+EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
+
+static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
+{
+ u64 idle_time;
+ u64 cur_wall_time;
+ u64 busy_time;
+
+ cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
+
+ busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
+
+ idle_time = cur_wall_time - busy_time;
+ if (wall)
+ *wall = cputime_to_usecs(cur_wall_time);
+
+ return cputime_to_usecs(idle_time);
+}
+
+u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
+{
+ u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
+
+ if (idle_time == -1ULL)
+ return get_cpu_idle_time_jiffy(cpu, wall);
+ else if (!io_busy)
+ idle_time += get_cpu_iowait_time_us(cpu, wall);
+
+ return idle_time;
+}
+EXPORT_SYMBOL_GPL(get_cpu_idle_time);
static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
{
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index a86ff72141f..ace35effc4b 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -23,20 +23,11 @@
#include <linux/kernel_stat.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/tick.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include "cpufreq_governor.h"
-static struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
-{
- if (have_governor_per_policy())
- return &policy->kobj;
- else
- return cpufreq_global_kobject;
-}
-
static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
{
if (have_governor_per_policy())
@@ -45,41 +36,6 @@ static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
return dbs_data->cdata->attr_group_gov_sys;
}
-static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
-{
- u64 idle_time;
- u64 cur_wall_time;
- u64 busy_time;
-
- cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
-
- busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
-
- idle_time = cur_wall_time - busy_time;
- if (wall)
- *wall = cputime_to_usecs(cur_wall_time);
-
- return cputime_to_usecs(idle_time);
-}
-
-u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
-{
- u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
-
- if (idle_time == -1ULL)
- return get_cpu_idle_time_jiffy(cpu, wall);
- else if (!io_busy)
- idle_time += get_cpu_iowait_time_us(cpu, wall);
-
- return idle_time;
-}
-EXPORT_SYMBOL_GPL(get_cpu_idle_time);
-
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
{
struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 0d9e6befe1d..c501ca83d75 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -256,7 +256,6 @@ static ssize_t show_sampling_rate_min_gov_pol \
return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \
}
-u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
bool need_load_eval(struct cpu_dbs_common_info *cdbs,
unsigned int sampling_rate);
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
new file mode 100644
index 00000000000..c08831720fa
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -0,0 +1,1341 @@
+/*
+ * drivers/cpufreq/cpufreq_interactive.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Author: Mike Chan (mike@android.com)
+ *
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+#include <linux/tick.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include "cpufreq_governor.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/cpufreq_interactive.h>
+
+struct cpufreq_interactive_cpuinfo {
+ struct timer_list cpu_timer;
+ struct timer_list cpu_slack_timer;
+ spinlock_t load_lock; /* protects the next 4 fields */
+ u64 time_in_idle;
+ u64 time_in_idle_timestamp;
+ u64 cputime_speedadj;
+ u64 cputime_speedadj_timestamp;
+ struct cpufreq_policy *policy;
+ struct cpufreq_frequency_table *freq_table;
+ unsigned int target_freq;
+ unsigned int floor_freq;
+ u64 floor_validate_time;
+ u64 hispeed_validate_time;
+ struct rw_semaphore enable_sem;
+ int governor_enabled;
+};
+
+static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
+
+/* realtime thread handles frequency scaling */
+static struct task_struct *speedchange_task;
+static cpumask_t speedchange_cpumask;
+static spinlock_t speedchange_cpumask_lock;
+static struct mutex gov_lock;
+
+/* Target load. Lower values result in higher CPU speeds. */
+#define DEFAULT_TARGET_LOAD 90
+static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
+
+#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
+#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
+static unsigned int default_above_hispeed_delay[] = {
+ DEFAULT_ABOVE_HISPEED_DELAY };
+
+struct cpufreq_interactive_tunables {
+ int usage_count;
+ /* Hi speed to bump to from lo speed when load burst (default max) */
+ unsigned int hispeed_freq;
+ /* Go to hi speed when CPU load at or above this value. */
+#define DEFAULT_GO_HISPEED_LOAD 99
+ unsigned long go_hispeed_load;
+ /* Target load. Lower values result in higher CPU speeds. */
+ spinlock_t target_loads_lock;
+ unsigned int *target_loads;
+ int ntarget_loads;
+ /*
+ * The minimum amount of time to spend at a frequency before we can ramp
+ * down.
+ */
+#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
+ unsigned long min_sample_time;
+ /*
+ * The sample rate of the timer used to increase frequency
+ */
+ unsigned long timer_rate;
+ /*
+ * Wait this long before raising speed above hispeed, by default a
+ * single timer interval.
+ */
+ spinlock_t above_hispeed_delay_lock;
+ unsigned int *above_hispeed_delay;
+ int nabove_hispeed_delay;
+ /* Non-zero means indefinite speed boost active */
+ int boost_val;
+ /* Duration of a boot pulse in usecs */
+ int boostpulse_duration_val;
+ /* End time of boost pulse in ktime converted to usecs */
+ u64 boostpulse_endtime;
+ /*
+ * Max additional time to wait in idle, beyond timer_rate, at speeds
+ * above minimum before wakeup to reduce speed, or -1 if unnecessary.
+ */
+#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
+ int timer_slack_val;
+ bool io_is_busy;
+};
+
+/* For cases where we have single governor instance for system */
+struct cpufreq_interactive_tunables *common_tunables;
+
+static struct attribute_group *get_sysfs_attr(void);
+
+static void cpufreq_interactive_timer_resched(
+ struct cpufreq_interactive_cpuinfo *pcpu)
+{
+ struct cpufreq_interactive_tunables *tunables =
+ pcpu->policy->governor_data;
+ unsigned long expires;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pcpu->load_lock, flags);
+ pcpu->time_in_idle =
+ get_cpu_idle_time(smp_processor_id(),
+ &pcpu->time_in_idle_timestamp,
+ tunables->io_is_busy);
+ pcpu->cputime_speedadj = 0;
+ pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
+ expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
+ mod_timer_pinned(&pcpu->cpu_timer, expires);
+
+ if (tunables->timer_slack_val >= 0 &&
+ pcpu->target_freq > pcpu->policy->min) {
+ expires += usecs_to_jiffies(tunables->timer_slack_val);
+ mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
+ }
+
+ spin_unlock_irqrestore(&pcpu->load_lock, flags);
+}
+
+/* The caller shall take enable_sem write semaphore to avoid any timer race.
+ * The cpu_timer and cpu_slack_timer must be deactivated when calling this
+ * function.
+ */
+static void cpufreq_interactive_timer_start(
+ struct cpufreq_interactive_tunables *tunables, int cpu)
+{
+ struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
+ unsigned long expires = jiffies +
+ usecs_to_jiffies(tunables->timer_rate);
+ unsigned long flags;
+
+ pcpu->cpu_timer.expires = expires;
+ add_timer_on(&pcpu->cpu_timer, cpu);
+ if (tunables->timer_slack_val >= 0 &&
+ pcpu->target_freq > pcpu->policy->min) {
+ expires += usecs_to_jiffies(tunables->timer_slack_val);
+ pcpu->cpu_slack_timer.expires = expires;
+ add_timer_on(&pcpu->cpu_slack_timer, cpu);
+ }
+
+ spin_lock_irqsave(&pcpu->load_lock, flags);
+ pcpu->time_in_idle =
+ get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
+ tunables->io_is_busy);
+ pcpu->cputime_speedadj = 0;
+ pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
+ spin_unlock_irqrestore(&pcpu->load_lock, flags);
+}
+
+static unsigned int freq_to_above_hispeed_delay(
+ struct cpufreq_interactive_tunables *tunables,
+ unsigned int freq)
+{
+ int i;
+ unsigned int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
+
+ for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
+ freq >= tunables->above_hispeed_delay[i+1]; i += 2)
+ ;
+
+ ret = tunables->above_hispeed_delay[i];
+ spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
+ return ret;
+}
+
+static unsigned int freq_to_targetload(
+ struct cpufreq_interactive_tunables *tunables, unsigned int freq)
+{
+ int i;
+ unsigned int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tunables->target_loads_lock, flags);
+
+ for (i = 0; i < tunables->ntarget_loads - 1 &&
+ freq >= tunables->target_loads[i+1]; i += 2)
+ ;
+
+ ret = tunables->target_loads[i];
+ spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
+ return ret;
+}
+
+/*
+ * If increasing frequencies never map to a lower target load then
+ * choose_freq() will find the minimum frequency that does not exceed its
+ * target load given the current load.
+ */
+static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
+ unsigned int loadadjfreq)
+{
+ unsigned int freq = pcpu->policy->cur;
+ unsigned int prevfreq, freqmin, freqmax;
+ unsigned int tl;
+ int index;
+
+ freqmin = 0;
+ freqmax = UINT_MAX;
+
+ do {
+ prevfreq = freq;
+ tl = freq_to_targetload(pcpu->policy->governor_data, freq);
+
+ /*
+ * Find the lowest frequency where the computed load is less
+ * than or equal to the target load.
+ */
+
+ if (cpufreq_frequency_table_target(
+ pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
+ CPUFREQ_RELATION_L, &index))
+ break;
+ freq = pcpu->freq_table[index].frequency;
+
+ if (freq > prevfreq) {
+ /* The previous frequency is too low. */
+ freqmin = prevfreq;
+
+ if (freq >= freqmax) {
+ /*
+ * Find the highest frequency that is less
+ * than freqmax.
+ */
+ if (cpufreq_frequency_table_target(
+ pcpu->policy, pcpu->freq_table,
+ freqmax - 1, CPUFREQ_RELATION_H,
+ &index))
+ break;
+ freq = pcpu->freq_table[index].frequency;
+
+ if (freq == freqmin) {
+ /*
+ * The first frequency below freqmax
+ * has already been found to be too
+ * low. freqmax is the lowest speed
+ * we found that is fast enough.
+ */
+ freq = freqmax;
+ break;
+ }
+ }
+ } else if (freq < prevfreq) {
+ /* The previous frequency is high enough. */
+ freqmax = prevfreq;
+
+ if (freq <= freqmin) {
+ /*
+ * Find the lowest frequency that is higher
+ * than freqmin.
+ */
+ if (cpufreq_frequency_table_target(
+ pcpu->policy, pcpu->freq_table,
+ freqmin + 1, CPUFREQ_RELATION_L,
+ &index))
+ break;
+ freq = pcpu->freq_table[index].frequency;
+
+ /*
+ * If freqmax is the first frequency above
+ * freqmin then we have already found that
+ * this speed is fast enough.
+ */
+ if (freq == freqmax)
+ break;
+ }
+ }
+
+ /* If same frequency chosen as previous then done. */
+ } while (freq != prevfreq);
+
+ return freq;
+}
+
+static u64 update_load(int cpu)
+{
+ struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
+ struct cpufreq_interactive_tunables *tunables =
+ pcpu->policy->governor_data;
+ u64 now;
+ u64 now_idle;
+ u64 delta_idle;
+ u64 delta_time;
+ u64 active_time;
+
+ now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
+ delta_idle = (now_idle - pcpu->time_in_idle);
+ delta_time = (now - pcpu->time_in_idle_timestamp);
+
+ if (delta_time <= delta_idle)
+ active_time = 0;
+ else
+ active_time = delta_time - delta_idle;
+
+ pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
+
+ pcpu->time_in_idle = now_idle;
+ pcpu->time_in_idle_timestamp = now;
+ return now;
+}
+
+static void cpufreq_interactive_timer(unsigned long data)
+{
+ u64 now;
+ unsigned int delta_time;
+ u64 cputime_speedadj;
+ int cpu_load;
+ struct cpufreq_interactive_cpuinfo *pcpu =
+ &per_cpu(cpuinfo, data);
+ struct cpufreq_interactive_tunables *tunables =
+ pcpu->policy->governor_data;
+ unsigned int new_freq;
+ unsigned int loadadjfreq;
+ unsigned int index;
+ unsigned long flags;
+ bool boosted;
+
+ if (!down_read_trylock(&pcpu->enable_sem))
+ return;
+ if (!pcpu->governor_enabled)
+ goto exit;
+
+ spin_lock_irqsave(&pcpu->load_lock, flags);
+ now = update_load(data);
+ delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
+ cputime_speedadj = pcpu->cputime_speedadj;
+ spin_unlock_irqrestore(&pcpu->load_lock, flags);
+
+ if (WARN_ON_ONCE(!delta_time))
+ goto rearm;
+
+ do_div(cputime_speedadj, delta_time);
+ loadadjfreq = (unsigned int)cputime_speedadj * 100;
+ cpu_load = loadadjfreq / pcpu->target_freq;
+ boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
+
+ if (cpu_load >= tunables->go_hispeed_load || boosted) {
+ if (pcpu->target_freq < tunables->hispeed_freq) {
+ new_freq = tunables->hispeed_freq;
+ } else {
+ new_freq = choose_freq(pcpu, loadadjfreq);
+
+ if (new_freq < tunables->hispeed_freq)
+ new_freq = tunables->hispeed_freq;
+ }
+ } else {
+ new_freq = choose_freq(pcpu, loadadjfreq);
+ }
+
+ if (pcpu->target_freq >= tunables->hispeed_freq &&
+ new_freq > pcpu->target_freq &&
+ now - pcpu->hispeed_validate_time <
+ freq_to_above_hispeed_delay(tunables, pcpu->target_freq)) {
+ trace_cpufreq_interactive_notyet(
+ data, cpu_load, pcpu->target_freq,
+ pcpu->policy->cur, new_freq);
+ goto rearm;
+ }
+
+ pcpu->hispeed_validate_time = now;
+
+ if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
+ new_freq, CPUFREQ_RELATION_L,
+ &index))
+ goto rearm;
+
+ new_freq = pcpu->freq_table[index].frequency;
+
+ /*
+ * Do not scale below floor_freq unless we have been at or above the
+ * floor frequency for the minimum sample time since last validated.
+ */
+ if (new_freq < pcpu->floor_freq) {
+ if (now - pcpu->floor_validate_time <
+ tunables->min_sample_time) {
+ trace_cpufreq_interactive_notyet(
+ data, cpu_load, pcpu->target_freq,
+ pcpu->policy->cur, new_freq);
+ goto rearm;
+ }
+ }
+
+ /*
+ * Update the timestamp for checking whether speed has been held at
+ * or above the selected frequency for a minimum of min_sample_time,
+ * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
+ * allow the speed to drop as soon as the boostpulse duration expires
+ * (or the indefinite boost is turned off).
+ */
+
+ if (!boosted || new_freq > tunables->hispeed_freq) {
+ pcpu->floor_freq = new_freq;
+ pcpu->floor_validate_time = now;
+ }
+
+ if (pcpu->target_freq == new_freq) {
+ trace_cpufreq_interactive_already(
+ data, cpu_load, pcpu->target_freq,
+ pcpu->policy->cur, new_freq);
+ goto rearm_if_notmax;
+ }
+
+ trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
+ pcpu->policy->cur, new_freq);
+
+ pcpu->target_freq = new_freq;
+ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+ cpumask_set_cpu(data, &speedchange_cpumask);
+ spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+ wake_up_process(speedchange_task);
+
+rearm_if_notmax:
+ /*
+ * Already set max speed and don't see a need to change that,
+ * wait until next idle to re-evaluate, don't need timer.
+ */
+ if (pcpu->target_freq == pcpu->policy->max)
+ goto exit;
+
+rearm:
+ if (!timer_pending(&pcpu->cpu_timer))
+ cpufreq_interactive_timer_resched(pcpu);
+
+exit:
+ up_read(&pcpu->enable_sem);
+ return;
+}
+
+static void cpufreq_interactive_idle_start(void)
+{
+ struct cpufreq_interactive_cpuinfo *pcpu =
+ &per_cpu(cpuinfo, smp_processor_id());
+ int pending;
+
+ if (!down_read_trylock(&pcpu->enable_sem))
+ return;
+ if (!pcpu->governor_enabled) {
+ up_read(&pcpu->enable_sem);
+ return;
+ }
+
+ pending = timer_pending(&pcpu->cpu_timer);
+
+ if (pcpu->target_freq != pcpu->policy->min) {
+ /*
+ * Entering idle while not at lowest speed. On some
+ * platforms this can hold the other CPU(s) at that speed
+ * even though the CPU is idle. Set a timer to re-evaluate
+ * speed so this idle CPU doesn't hold the other CPUs above
+ * min indefinitely. This should probably be a quirk of
+ * the CPUFreq driver.
+ */
+ if (!pending)
+ cpufreq_interactive_timer_resched(pcpu);
+ }
+
+ up_read(&pcpu->enable_sem);
+}
+
+static void cpufreq_interactive_idle_end(void)
+{
+ struct cpufreq_interactive_cpuinfo *pcpu =
+ &per_cpu(cpuinfo, smp_processor_id());
+
+ if (!down_read_trylock(&pcpu->enable_sem))
+ return;
+ if (!pcpu->governor_enabled) {
+ up_read(&pcpu->enable_sem);
+ return;
+ }
+
+ /* Arm the timer for 1-2 ticks later if not already. */
+ if (!timer_pending(&pcpu->cpu_timer)) {
+ cpufreq_interactive_timer_resched(pcpu);
+ } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
+ del_timer(&pcpu->cpu_timer);
+ del_timer(&pcpu->cpu_slack_timer);
+ cpufreq_interactive_timer(smp_processor_id());
+ }
+
+ up_read(&pcpu->enable_sem);
+}
+
+static int cpufreq_interactive_speedchange_task(void *data)
+{
+ unsigned int cpu;
+ cpumask_t tmp_mask;
+ unsigned long flags;
+ struct cpufreq_interactive_cpuinfo *pcpu;
+
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+
+ if (cpumask_empty(&speedchange_cpumask)) {
+ spin_unlock_irqrestore(&speedchange_cpumask_lock,
+ flags);
+ schedule();
+
+ if (kthread_should_stop())
+ break;
+
+ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+ }
+
+ set_current_state(TASK_RUNNING);
+ tmp_mask = speedchange_cpumask;
+ cpumask_clear(&speedchange_cpumask);
+ spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+
+ for_each_cpu(cpu, &tmp_mask) {
+ unsigned int j;
+ unsigned int max_freq = 0;
+
+ pcpu = &per_cpu(cpuinfo, cpu);
+ if (!down_read_trylock(&pcpu->enable_sem))
+ continue;
+ if (!pcpu->governor_enabled) {
+ up_read(&pcpu->enable_sem);
+ continue;
+ }
+
+ for_each_cpu(j, pcpu->policy->cpus) {
+ struct cpufreq_interactive_cpuinfo *pjcpu =
+ &per_cpu(cpuinfo, j);
+
+ if (pjcpu->target_freq > max_freq)
+ max_freq = pjcpu->target_freq;
+ }
+
+ if (max_freq != pcpu->policy->cur)
+ __cpufreq_driver_target(pcpu->policy,
+ max_freq,
+ CPUFREQ_RELATION_H);
+ trace_cpufreq_interactive_setspeed(cpu,
+ pcpu->target_freq,
+ pcpu->policy->cur);
+
+ up_read(&pcpu->enable_sem);
+ }
+ }
+
+ return 0;
+}
+
+static void cpufreq_interactive_boost(void)
+{
+ int i;
+ int anyboost = 0;
+ unsigned long flags;
+ struct cpufreq_interactive_cpuinfo *pcpu;
+ struct cpufreq_interactive_tunables *tunables;
+
+ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+
+ for_each_online_cpu(i) {
+ pcpu = &per_cpu(cpuinfo, i);
+ tunables = pcpu->policy->governor_data;
+
+ if (pcpu->target_freq < tunables->hispeed_freq) {
+ pcpu->target_freq = tunables->hispeed_freq;
+ cpumask_set_cpu(i, &speedchange_cpumask);
+ pcpu->hispeed_validate_time =
+ ktime_to_us(ktime_get());
+ anyboost = 1;
+ }
+
+ /*
+ * Set floor freq and (re)start timer for when last
+ * validated.
+ */
+
+ pcpu->floor_freq = tunables->hispeed_freq;
+ pcpu->floor_validate_time = ktime_to_us(ktime_get());
+ }
+
+ spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+
+ if (anyboost)
+ wake_up_process(speedchange_task);
+}
+
+static int cpufreq_interactive_notifier(
+ struct notifier_block *nb, unsigned long val, void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ struct cpufreq_interactive_cpuinfo *pcpu;
+ int cpu;
+ unsigned long flags;
+
+ if (val == CPUFREQ_POSTCHANGE) {
+ pcpu = &per_cpu(cpuinfo, freq->cpu);
+ if (!down_read_trylock(&pcpu->enable_sem))
+ return 0;
+ if (!pcpu->governor_enabled) {
+ up_read(&pcpu->enable_sem);
+ return 0;
+ }
+
+ for_each_cpu(cpu, pcpu->policy->cpus) {
+ struct cpufreq_interactive_cpuinfo *pjcpu =
+ &per_cpu(cpuinfo, cpu);
+ if (cpu != freq->cpu) {
+ if (!down_read_trylock(&pjcpu->enable_sem))
+ continue;
+ if (!pjcpu->governor_enabled) {
+ up_read(&pjcpu->enable_sem);
+ continue;
+ }
+ }
+ spin_lock_irqsave(&pjcpu->load_lock, flags);
+ update_load(cpu);
+ spin_unlock_irqrestore(&pjcpu->load_lock, flags);
+ if (cpu != freq->cpu)
+ up_read(&pjcpu->enable_sem);
+ }
+
+ up_read(&pcpu->enable_sem);
+ }
+ return 0;
+}
+
+static struct notifier_block cpufreq_notifier_block = {
+ .notifier_call = cpufreq_interactive_notifier,
+};
+
+static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
+{
+ const char *cp;
+ int i;
+ int ntokens = 1;
+ unsigned int *tokenized_data;
+ int err = -EINVAL;
+
+ cp = buf;
+ while ((cp = strpbrk(cp + 1, " :")))
+ ntokens++;
+
+ if (!(ntokens & 0x1))
+ goto err;
+
+ tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
+ if (!tokenized_data) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ cp = buf;
+ i = 0;
+ while (i < ntokens) {
+ if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
+ goto err_kfree;
+
+ cp = strpbrk(cp, " :");
+ if (!cp)
+ break;
+ cp++;
+ }
+
+ if (i != ntokens)
+ goto err_kfree;
+
+ *num_tokens = ntokens;
+ return tokenized_data;
+
+err_kfree:
+ kfree(tokenized_data);
+err:
+ return ERR_PTR(err);
+}
+
+static ssize_t show_target_loads(
+ struct cpufreq_interactive_tunables *tunables,
+ char *buf)
+{
+ int i;
+ ssize_t ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tunables->target_loads_lock, flags);
+
+ for (i = 0; i < tunables->ntarget_loads; i++)
+ ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
+ i & 0x1 ? ":" : " ");
+
+ sprintf(buf + ret - 1, "\n");
+ spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
+ return ret;
+}
+
+static ssize_t store_target_loads(
+ struct cpufreq_interactive_tunables *tunables,
+ const char *buf, size_t count)
+{
+ int ntokens;
+ unsigned int *new_target_loads = NULL;
+ unsigned long flags;
+
+ new_target_loads = get_tokenized_data(buf, &ntokens);
+ if (IS_ERR(new_target_loads))
+ return PTR_RET(new_target_loads);
+
+ spin_lock_irqsave(&tunables->target_loads_lock, flags);
+ if (tunables->target_loads != default_target_loads)
+ kfree(tunables->target_loads);
+ tunables->target_loads = new_target_loads;
+ tunables->ntarget_loads = ntokens;
+ spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
+ return count;
+}
+
+static ssize_t show_above_hispeed_delay(
+ struct cpufreq_interactive_tunables *tunables, char *buf)
+{
+ int i;
+ ssize_t ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
+
+ for (i = 0; i < tunables->nabove_hispeed_delay; i++)
+ ret += sprintf(buf + ret, "%u%s",
+ tunables->above_hispeed_delay[i],
+ i & 0x1 ? ":" : " ");
+
+ sprintf(buf + ret - 1, "\n");
+ spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
+ return ret;
+}
+
+static ssize_t store_above_hispeed_delay(
+ struct cpufreq_interactive_tunables *tunables,
+ const char *buf, size_t count)
+{
+ int ntokens;
+ unsigned int *new_above_hispeed_delay = NULL;
+ unsigned long flags;
+
+ new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
+ if (IS_ERR(new_above_hispeed_delay))
+ return PTR_RET(new_above_hispeed_delay);
+
+ spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
+ if (tunables->above_hispeed_delay != default_above_hispeed_delay)
+ kfree(tunables->above_hispeed_delay);
+ tunables->above_hispeed_delay = new_above_hispeed_delay;
+ tunables->nabove_hispeed_delay = ntokens;
+ spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
+ return count;
+
+}
+
+static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", tunables->hispeed_freq);
+}
+
+static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
+ const char *buf, size_t count)
+{
+ int ret;
+ long unsigned int val;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ tunables->hispeed_freq = val;
+ return count;
+}
+
+static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
+ *tunables, char *buf)
+{
+ return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
+}
+
+static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
+ *tunables, const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ tunables->go_hispeed_load = val;
+ return count;
+}
+
+static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
+ *tunables, char *buf)
+{
+ return sprintf(buf, "%lu\n", tunables->min_sample_time);
+}
+
+static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
+ *tunables, const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ tunables->min_sample_time = val;
+ return count;
+}
+
+static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
+ char *buf)
+{
+ return sprintf(buf, "%lu\n", tunables->timer_rate);
+}
+
+static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ tunables->timer_rate = val;
+ return count;
+}
+
+static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", tunables->timer_slack_val);
+}
+
+static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ tunables->timer_slack_val = val;
+ return count;
+}
+
+static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", tunables->boost_val);
+}
+
+static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ tunables->boost_val = val;
+
+ if (tunables->boost_val) {
+ trace_cpufreq_interactive_boost("on");
+ cpufreq_interactive_boost();
+ } else {
+ trace_cpufreq_interactive_unboost("off");
+ }
+
+ return count;
+}
+
+static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
+ tunables->boostpulse_duration_val;
+ trace_cpufreq_interactive_boost("pulse");
+ cpufreq_interactive_boost();
+ return count;
+}
+
+static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
+ *tunables, char *buf)
+{
+ return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
+}
+
+static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
+ *tunables, const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ tunables->boostpulse_duration_val = val;
+ return count;
+}
+
+static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", tunables->io_is_busy);
+}
+
+static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ tunables->io_is_busy = val;
+ return count;
+}
+
+/*
+ * Create show/store routines
+ * - sys: One governor instance for complete SYSTEM
+ * - pol: One governor instance per struct cpufreq_policy
+ */
+#define show_gov_pol_sys(file_name) \
+static ssize_t show_##file_name##_gov_sys \
+(struct kobject *kobj, struct attribute *attr, char *buf) \
+{ \
+ return show_##file_name(common_tunables, buf); \
+} \
+ \
+static ssize_t show_##file_name##_gov_pol \
+(struct cpufreq_policy *policy, char *buf) \
+{ \
+ return show_##file_name(policy->governor_data, buf); \
+}
+
+#define store_gov_pol_sys(file_name) \
+static ssize_t store_##file_name##_gov_sys \
+(struct kobject *kobj, struct attribute *attr, const char *buf, \
+ size_t count) \
+{ \
+ return store_##file_name(common_tunables, buf, count); \
+} \
+ \
+static ssize_t store_##file_name##_gov_pol \
+(struct cpufreq_policy *policy, const char *buf, size_t count) \
+{ \
+ return store_##file_name(policy->governor_data, buf, count); \
+}
+
+#define show_store_gov_pol_sys(file_name) \
+show_gov_pol_sys(file_name); \
+store_gov_pol_sys(file_name)
+
+show_store_gov_pol_sys(target_loads);
+show_store_gov_pol_sys(above_hispeed_delay);
+show_store_gov_pol_sys(hispeed_freq);
+show_store_gov_pol_sys(go_hispeed_load);
+show_store_gov_pol_sys(min_sample_time);
+show_store_gov_pol_sys(timer_rate);
+show_store_gov_pol_sys(timer_slack);
+show_store_gov_pol_sys(boost);
+store_gov_pol_sys(boostpulse);
+show_store_gov_pol_sys(boostpulse_duration);
+show_store_gov_pol_sys(io_is_busy);
+
+#define gov_sys_attr_rw(_name) \
+static struct global_attr _name##_gov_sys = \
+__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
+
+#define gov_pol_attr_rw(_name) \
+static struct freq_attr _name##_gov_pol = \
+__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
+
+#define gov_sys_pol_attr_rw(_name) \
+ gov_sys_attr_rw(_name); \
+ gov_pol_attr_rw(_name)
+
+gov_sys_pol_attr_rw(target_loads);
+gov_sys_pol_attr_rw(above_hispeed_delay);
+gov_sys_pol_attr_rw(hispeed_freq);
+gov_sys_pol_attr_rw(go_hispeed_load);
+gov_sys_pol_attr_rw(min_sample_time);
+gov_sys_pol_attr_rw(timer_rate);
+gov_sys_pol_attr_rw(timer_slack);
+gov_sys_pol_attr_rw(boost);
+gov_sys_pol_attr_rw(boostpulse_duration);
+gov_sys_pol_attr_rw(io_is_busy);
+
+static struct global_attr boostpulse_gov_sys =
+ __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
+
+static struct freq_attr boostpulse_gov_pol =
+ __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
+
+/* One Governor instance for entire system */
+static struct attribute *interactive_attributes_gov_sys[] = {
+ &target_loads_gov_sys.attr,
+ &above_hispeed_delay_gov_sys.attr,
+ &hispeed_freq_gov_sys.attr,
+ &go_hispeed_load_gov_sys.attr,
+ &min_sample_time_gov_sys.attr,
+ &timer_rate_gov_sys.attr,
+ &timer_slack_gov_sys.attr,
+ &boost_gov_sys.attr,
+ &boostpulse_gov_sys.attr,
+ &boostpulse_duration_gov_sys.attr,
+ &io_is_busy_gov_sys.attr,
+ NULL,
+};
+
+static struct attribute_group interactive_attr_group_gov_sys = {
+ .attrs = interactive_attributes_gov_sys,
+ .name = "interactive",
+};
+
+/* Per policy governor instance */
+static struct attribute *interactive_attributes_gov_pol[] = {
+ &target_loads_gov_pol.attr,
+ &above_hispeed_delay_gov_pol.attr,
+ &hispeed_freq_gov_pol.attr,
+ &go_hispeed_load_gov_pol.attr,
+ &min_sample_time_gov_pol.attr,
+ &timer_rate_gov_pol.attr,
+ &timer_slack_gov_pol.attr,
+ &boost_gov_pol.attr,
+ &boostpulse_gov_pol.attr,
+ &boostpulse_duration_gov_pol.attr,
+ &io_is_busy_gov_pol.attr,
+ NULL,
+};
+
+static struct attribute_group interactive_attr_group_gov_pol = {
+ .attrs = interactive_attributes_gov_pol,
+ .name = "interactive",
+};
+
+static struct attribute_group *get_sysfs_attr(void)
+{
+ if (have_governor_per_policy())
+ return &interactive_attr_group_gov_pol;
+ else
+ return &interactive_attr_group_gov_sys;
+}
+
+static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
+ unsigned long val,
+ void *data)
+{
+ switch (val) {
+ case IDLE_START:
+ cpufreq_interactive_idle_start();
+ break;
+ case IDLE_END:
+ cpufreq_interactive_idle_end();
+ break;
+ }
+
+ return 0;
+}
+
+static struct notifier_block cpufreq_interactive_idle_nb = {
+ .notifier_call = cpufreq_interactive_idle_notifier,
+};
+
+static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
+ unsigned int event)
+{
+ int rc;
+ unsigned int j;
+ struct cpufreq_interactive_cpuinfo *pcpu;
+ struct cpufreq_frequency_table *freq_table;
+ struct cpufreq_interactive_tunables *tunables;
+
+ if (have_governor_per_policy())
+ tunables = policy->governor_data;
+ else
+ tunables = common_tunables;
+
+ WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
+
+ switch (event) {
+ case CPUFREQ_GOV_POLICY_INIT:
+ if (have_governor_per_policy()) {
+ WARN_ON(tunables);
+ } else if (tunables) {
+ tunables->usage_count++;
+ policy->governor_data = tunables;
+ return 0;
+ }
+
+ tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
+ if (!tunables) {
+ pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ rc = sysfs_create_group(get_governor_parent_kobj(policy),
+ get_sysfs_attr());
+ if (rc) {
+ kfree(tunables);
+ return rc;
+ }
+
+ tunables->usage_count = 1;
+ tunables->above_hispeed_delay = default_above_hispeed_delay;
+ tunables->nabove_hispeed_delay =
+ ARRAY_SIZE(default_above_hispeed_delay);
+ tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
+ tunables->target_loads = default_target_loads;
+ tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
+ tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
+ tunables->timer_rate = DEFAULT_TIMER_RATE;
+ tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
+ tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
+
+ spin_lock_init(&tunables->target_loads_lock);
+ spin_lock_init(&tunables->above_hispeed_delay_lock);
+
+ if (!policy->governor->initialized) {
+ idle_notifier_register(&cpufreq_interactive_idle_nb);
+ cpufreq_register_notifier(&cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ }
+
+ policy->governor_data = tunables;
+ if (!have_governor_per_policy())
+ common_tunables = tunables;
+
+ break;
+
+ case CPUFREQ_GOV_POLICY_EXIT:
+ if (!--tunables->usage_count) {
+ if (policy->governor->initialized == 1) {
+ cpufreq_unregister_notifier(&cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ idle_notifier_unregister(&cpufreq_interactive_idle_nb);
+ }
+
+ sysfs_remove_group(get_governor_parent_kobj(policy),
+ get_sysfs_attr());
+ kfree(tunables);
+ common_tunables = NULL;
+ }
+
+ policy->governor_data = NULL;
+ break;
+
+ case CPUFREQ_GOV_START:
+ mutex_lock(&gov_lock);
+
+ freq_table = cpufreq_frequency_get_table(policy->cpu);
+ if (!tunables->hispeed_freq)
+ tunables->hispeed_freq = policy->max;
+
+ for_each_cpu(j, policy->cpus) {
+ pcpu = &per_cpu(cpuinfo, j);
+ pcpu->policy = policy;
+ pcpu->target_freq = policy->cur;
+ pcpu->freq_table = freq_table;
+ pcpu->floor_freq = pcpu->target_freq;
+ pcpu->floor_validate_time =
+ ktime_to_us(ktime_get());
+ pcpu->hispeed_validate_time =
+ pcpu->floor_validate_time;
+ down_write(&pcpu->enable_sem);
+ del_timer_sync(&pcpu->cpu_timer);
+ del_timer_sync(&pcpu->cpu_slack_timer);
+ cpufreq_interactive_timer_start(tunables, j);
+ pcpu->governor_enabled = 1;
+ up_write(&pcpu->enable_sem);
+ }
+
+ mutex_unlock(&gov_lock);
+ break;
+
+ case CPUFREQ_GOV_STOP:
+ mutex_lock(&gov_lock);
+ for_each_cpu(j, policy->cpus) {
+ pcpu = &per_cpu(cpuinfo, j);
+ down_write(&pcpu->enable_sem);
+ pcpu->governor_enabled = 0;
+ del_timer_sync(&pcpu->cpu_timer);
+ del_timer_sync(&pcpu->cpu_slack_timer);
+ up_write(&pcpu->enable_sem);
+ }
+
+ mutex_unlock(&gov_lock);
+ break;
+
+ case CPUFREQ_GOV_LIMITS:
+ if (policy->max < policy->cur)
+ __cpufreq_driver_target(policy,
+ policy->max, CPUFREQ_RELATION_H);
+ else if (policy->min > policy->cur)
+ __cpufreq_driver_target(policy,
+ policy->min, CPUFREQ_RELATION_L);
+ for_each_cpu(j, policy->cpus) {
+ pcpu = &per_cpu(cpuinfo, j);
+
+ /* hold write semaphore to avoid race */
+ down_write(&pcpu->enable_sem);
+ if (pcpu->governor_enabled == 0) {
+ up_write(&pcpu->enable_sem);
+ continue;
+ }
+
+ /* update target_freq firstly */
+ if (policy->max < pcpu->target_freq)
+ pcpu->target_freq = policy->max;
+ else if (policy->min > pcpu->target_freq)
+ pcpu->target_freq = policy->min;
+
+ /* Reschedule timer.
+ * Delete the timers, else the timer callback may
+ * return without re-arm the timer when failed
+ * acquire the semaphore. This race may cause timer
+ * stopped unexpectedly.
+ */
+ del_timer_sync(&pcpu->cpu_timer);
+ del_timer_sync(&pcpu->cpu_slack_timer);
+ cpufreq_interactive_timer_start(tunables, j);
+ up_write(&pcpu->enable_sem);
+ }
+ break;
+ }
+ return 0;
+}
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+static
+#endif
+struct cpufreq_governor cpufreq_gov_interactive = {
+ .name = "interactive",
+ .governor = cpufreq_governor_interactive,
+ .max_transition_latency = 10000000,
+ .owner = THIS_MODULE,
+};
+
+static void cpufreq_interactive_nop_timer(unsigned long data)
+{
+}
+
+static int __init cpufreq_interactive_init(void)
+{
+ unsigned int i;
+ struct cpufreq_interactive_cpuinfo *pcpu;
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+
+ /* Initalize per-cpu timers */
+ for_each_possible_cpu(i) {
+ pcpu = &per_cpu(cpuinfo, i);
+ init_timer_deferrable(&pcpu->cpu_timer);
+ pcpu->cpu_timer.function = cpufreq_interactive_timer;
+ pcpu->cpu_timer.data = i;
+ init_timer(&pcpu->cpu_slack_timer);
+ pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
+ spin_lock_init(&pcpu->load_lock);
+ init_rwsem(&pcpu->enable_sem);
+ }
+
+ spin_lock_init(&speedchange_cpumask_lock);
+ mutex_init(&gov_lock);
+ speedchange_task =
+ kthread_create(cpufreq_interactive_speedchange_task, NULL,
+ "cfinteractive");
+ if (IS_ERR(speedchange_task))
+ return PTR_ERR(speedchange_task);
+
+ sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
+ get_task_struct(speedchange_task);
+
+ /* NB: wake up so the thread does not look hung to the freezer */
+ wake_up_process(speedchange_task);
+
+ return cpufreq_register_governor(&cpufreq_gov_interactive);
+}
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+fs_initcall(cpufreq_interactive_init);
+#else
+module_init(cpufreq_interactive_init);
+#endif
+
+static void __exit cpufreq_interactive_exit(void)
+{
+ cpufreq_unregister_governor(&cpufreq_gov_interactive);
+ kthread_stop(speedchange_task);
+ put_task_struct(speedchange_task);
+}
+
+module_exit(cpufreq_interactive_exit);
+
+MODULE_AUTHOR("Mike Chan <mike@android.com>");
+MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
+ "Latency sensitive workloads");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 66733f1d55d..038c7cca0cf 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -344,6 +344,27 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
return 0;
}
+static int cpufreq_stats_create_table_cpu(unsigned int cpu)
+{
+ struct cpufreq_policy *policy;
+ struct cpufreq_frequency_table *table;
+ int ret = -ENODEV;
+
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return -ENODEV;
+
+ table = cpufreq_frequency_get_table(cpu);
+ if (!table)
+ goto out;
+
+ ret = cpufreq_stats_create_table(policy, table);
+
+out:
+ cpufreq_cpu_put(policy);
+ return ret;
+}
+
static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
@@ -363,6 +384,10 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
case CPU_DEAD_FROZEN:
cpufreq_stats_free_table(cpu);
break;
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
+ cpufreq_stats_create_table_cpu(cpu);
+ break;
}
return NOTIFY_OK;
}
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index bc580b67a65..33305fb3d5f 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -173,7 +173,12 @@ static inline int performance_multiplier(void)
/* for higher loadavg, we are more reluctant */
- mult += 2 * get_loadavg();
+ /*
+ * this doesn't work as intended - it is almost always 0, but can
+ * sometimes, depending on workload, spike very high into the hundreds
+ * even when the average cpu load is under 10%.
+ */
+ /* mult += 2 * get_loadavg(); */
/* for IO wait tasks (per cpu!) we add 5x each */
mult += 10 * nr_iowait_cpu(smp_processor_id());
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index c2534d62911..a35c5b932eb 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -136,7 +136,7 @@ static struct gpio_desc *gpio_to_desc(unsigned gpio)
*/
static int desc_to_gpio(const struct gpio_desc *desc)
{
- return desc->chip->base + gpio_chip_hwgpio(desc);
+ return desc - &gpio_desc[0];
}
@@ -1214,15 +1214,14 @@ int gpiochip_add(struct gpio_chip *chip)
}
}
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
#ifdef CONFIG_PINCTRL
INIT_LIST_HEAD(&chip->pin_ranges);
#endif
of_gpiochip_add(chip);
-unlock:
- spin_unlock_irqrestore(&gpio_lock, flags);
-
if (status)
goto fail;
@@ -1235,6 +1234,9 @@ unlock:
chip->label ? : "generic");
return 0;
+
+unlock:
+ spin_unlock_irqrestore(&gpio_lock, flags);
fail:
/* failures here can mean systems won't boot... */
pr_err("gpiochip_add: gpios %d..%d (%s) failed to register\n",
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 012880a2228..90c718f6ede 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1338,8 +1338,9 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
* UGCI) cram a lot of unrelated inputs into the
* same interface. */
hidinput->report = report;
- if (drv->input_configured)
- drv->input_configured(hid, hidinput);
+ if (drv->input_configured &&
+ drv->input_configured(hid, hidinput))
+ goto out_cleanup;
if (input_register_device(hidinput->input))
goto out_cleanup;
hidinput = NULL;
@@ -1360,8 +1361,9 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
}
if (hidinput) {
- if (drv->input_configured)
- drv->input_configured(hid, hidinput);
+ if (drv->input_configured &&
+ drv->input_configured(hid, hidinput))
+ goto out_cleanup;
if (input_register_device(hidinput->input))
goto out_cleanup;
}
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 3d8e58ac749..fb9ac126671 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -443,6 +443,16 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
(usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON)
td->mt_flags |= INPUT_MT_POINTER;
+ /* Only map fields from TouchScreen or TouchPad collections.
+ * We need to ignore fields that belong to other collections
+ * such as Mouse that might have the same GenericDesktop usages. */
+ if (field->application == HID_DG_TOUCHSCREEN)
+ set_bit(INPUT_PROP_DIRECT, hi->input->propbit);
+ else if (field->application == HID_DG_TOUCHPAD)
+ set_bit(INPUT_PROP_POINTER, hi->input->propbit);
+ else
+ return 0;
+
if (usage->usage_index)
prev_usage = &field->usage[usage->usage_index - 1];
@@ -772,12 +782,13 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
mt_sync_frame(td, report->field[0]->hidinput->input);
}
-static void mt_touch_input_configured(struct hid_device *hdev,
+static int mt_touch_input_configured(struct hid_device *hdev,
struct hid_input *hi)
{
struct mt_device *td = hid_get_drvdata(hdev);
struct mt_class *cls = &td->mtclass;
struct input_dev *input = hi->input;
+ int ret;
if (!td->maxcontacts)
td->maxcontacts = MT_DEFAULT_MAXCONTACT;
@@ -792,9 +803,12 @@ static void mt_touch_input_configured(struct hid_device *hdev,
if (cls->quirks & MT_QUIRK_NOT_SEEN_MEANS_UP)
td->mt_flags |= INPUT_MT_DROP_UNUSED;
- input_mt_init_slots(input, td->maxcontacts, td->mt_flags);
+ ret = input_mt_init_slots(input, td->maxcontacts, td->mt_flags);
+ if (ret)
+ return ret;
td->mt_flags = 0;
+ return 0;
}
static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
@@ -927,19 +941,21 @@ static void mt_post_parse(struct mt_device *td)
cls->quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE;
}
-static void mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
+static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
{
struct mt_device *td = hid_get_drvdata(hdev);
char *name = kstrdup(hdev->name, GFP_KERNEL);
+ int ret = 0;
if (name)
hi->input->name = name;
if (hi->report->id == td->mt_report_id)
- mt_touch_input_configured(hdev, hi);
+ ret = mt_touch_input_configured(hdev, hi);
if (hi->report->id == td->pen_report_id)
mt_pen_input_configured(hdev, hi);
+ return ret;
}
static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index 10aa9ef86ce..145c9861793 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -35,6 +35,7 @@
*/
struct iio_event_interface {
wait_queue_head_t wait;
+ struct mutex read_lock;
DECLARE_KFIFO(det_events, struct iio_event_data, 16);
struct list_head dev_attr_list;
@@ -97,14 +98,16 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
if (count < sizeof(struct iio_event_data))
return -EINVAL;
- spin_lock_irq(&ev_int->wait.lock);
+ if (mutex_lock_interruptible(&ev_int->read_lock))
+ return -ERESTARTSYS;
+
if (kfifo_is_empty(&ev_int->det_events)) {
if (filep->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
goto error_unlock;
}
/* Blocking on device; waiting for something to be there */
- ret = wait_event_interruptible_locked_irq(ev_int->wait,
+ ret = wait_event_interruptible(ev_int->wait,
!kfifo_is_empty(&ev_int->det_events));
if (ret)
goto error_unlock;
@@ -114,7 +117,7 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
error_unlock:
- spin_unlock_irq(&ev_int->wait.lock);
+ mutex_unlock(&ev_int->read_lock);
return ret ? ret : copied;
}
@@ -371,6 +374,7 @@ static void iio_setup_ev_int(struct iio_event_interface *ev_int)
{
INIT_KFIFO(ev_int->det_events);
init_waitqueue_head(&ev_int->wait);
+ mutex_init(&ev_int->read_lock);
}
static const char *iio_event_group_name = "events";
@@ -434,6 +438,7 @@ int iio_device_register_eventset(struct iio_dev *indio_dev)
error_free_setup_event_lines:
__iio_remove_event_config_attrs(indio_dev);
+ mutex_destroy(&indio_dev->event_interface->read_lock);
kfree(indio_dev->event_interface);
error_ret:
@@ -446,5 +451,6 @@ void iio_device_unregister_eventset(struct iio_dev *indio_dev)
return;
__iio_remove_event_config_attrs(indio_dev);
kfree(indio_dev->event_interface->group.attrs);
+ mutex_destroy(&indio_dev->event_interface->read_lock);
kfree(indio_dev->event_interface);
}
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index a11ff74a512..9c13b63ef91 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -174,6 +174,15 @@ config INPUT_APMPOWER
To compile this driver as a module, choose M here: the
module will be called apm-power.
+config INPUT_KEYRESET
+ tristate "Reset key"
+ depends on INPUT
+ ---help---
+ Say Y here if you want to reboot when some keys are pressed;
+
+ To compile this driver as a module, choose M here: the
+ module will be called keyreset.
+
comment "Input Device Drivers"
source "drivers/input/keyboard/Kconfig"
diff --git a/drivers/input/Makefile b/drivers/input/Makefile
index 5ca3f631497..191ea43d080 100644
--- a/drivers/input/Makefile
+++ b/drivers/input/Makefile
@@ -25,3 +25,4 @@ obj-$(CONFIG_INPUT_TOUCHSCREEN) += touchscreen/
obj-$(CONFIG_INPUT_MISC) += misc/
obj-$(CONFIG_INPUT_APMPOWER) += apm-power.o
+obj-$(CONFIG_INPUT_KEYRESET) += keyreset.o
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index c122dd2adc2..f4897c8c150 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -26,6 +26,7 @@
#include <linux/major.h>
#include <linux/device.h>
#include <linux/cdev.h>
+#include <linux/wakelock.h>
#include "input-compat.h"
struct evdev {
@@ -46,6 +47,9 @@ struct evdev_client {
unsigned int tail;
unsigned int packet_head; /* [future] position of the first element of next packet */
spinlock_t buffer_lock; /* protects access to buffer, head and tail */
+ struct wake_lock wake_lock;
+ bool use_wake_lock;
+ char name[28];
struct fasync_struct *fasync;
struct evdev *evdev;
struct list_head node;
@@ -73,10 +77,14 @@ static void __pass_event(struct evdev_client *client,
client->buffer[client->tail].value = 0;
client->packet_head = client->tail;
+ if (client->use_wake_lock)
+ wake_unlock(&client->wake_lock);
}
if (event->type == EV_SYN && event->code == SYN_REPORT) {
client->packet_head = client->head;
+ if (client->use_wake_lock)
+ wake_lock(&client->wake_lock);
kill_fasync(&client->fasync, SIGIO, POLL_IN);
}
}
@@ -291,6 +299,8 @@ static int evdev_release(struct inode *inode, struct file *file)
mutex_unlock(&evdev->mutex);
evdev_detach_client(evdev, client);
+ if (client->use_wake_lock)
+ wake_lock_destroy(&client->wake_lock);
if (is_vmalloc_addr(client))
vfree(client);
@@ -328,6 +338,8 @@ static int evdev_open(struct inode *inode, struct file *file)
client->bufsize = bufsize;
spin_lock_init(&client->buffer_lock);
+ snprintf(client->name, sizeof(client->name), "%s-%d",
+ dev_name(&evdev->dev), task_tgid_vnr(current));
client->evdev = evdev;
evdev_attach_client(evdev, client);
@@ -394,6 +406,9 @@ static int evdev_fetch_next_event(struct evdev_client *client,
if (have_event) {
*event = client->buffer[client->tail++];
client->tail &= client->bufsize - 1;
+ if (client->use_wake_lock &&
+ client->packet_head == client->tail)
+ wake_unlock(&client->wake_lock);
}
spin_unlock_irq(&client->buffer_lock);
@@ -682,6 +697,35 @@ static int evdev_handle_mt_request(struct input_dev *dev,
return 0;
}
+static int evdev_enable_suspend_block(struct evdev *evdev,
+ struct evdev_client *client)
+{
+ if (client->use_wake_lock)
+ return 0;
+
+ spin_lock_irq(&client->buffer_lock);
+ wake_lock_init(&client->wake_lock, WAKE_LOCK_SUSPEND, client->name);
+ client->use_wake_lock = true;
+ if (client->packet_head != client->tail)
+ wake_lock(&client->wake_lock);
+ spin_unlock_irq(&client->buffer_lock);
+ return 0;
+}
+
+static int evdev_disable_suspend_block(struct evdev *evdev,
+ struct evdev_client *client)
+{
+ if (!client->use_wake_lock)
+ return 0;
+
+ spin_lock_irq(&client->buffer_lock);
+ client->use_wake_lock = false;
+ wake_lock_destroy(&client->wake_lock);
+ spin_unlock_irq(&client->buffer_lock);
+
+ return 0;
+}
+
static long evdev_do_ioctl(struct file *file, unsigned int cmd,
void __user *p, int compat_mode)
{
@@ -763,6 +807,15 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
case EVIOCSKEYCODE_V2:
return evdev_handle_set_keycode_v2(dev, p);
+
+ case EVIOCGSUSPENDBLOCK:
+ return put_user(client->use_wake_lock, ip);
+
+ case EVIOCSSUSPENDBLOCK:
+ if (p)
+ return evdev_enable_suspend_block(evdev, client);
+ else
+ return evdev_disable_suspend_block(evdev, client);
}
size = _IOC_SIZE(cmd);
diff --git a/drivers/input/keyreset.c b/drivers/input/keyreset.c
new file mode 100644
index 00000000000..36208fe0baa
--- /dev/null
+++ b/drivers/input/keyreset.c
@@ -0,0 +1,239 @@
+/* drivers/input/keyreset.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/input.h>
+#include <linux/keyreset.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+
+
+struct keyreset_state {
+ struct input_handler input_handler;
+ unsigned long keybit[BITS_TO_LONGS(KEY_CNT)];
+ unsigned long upbit[BITS_TO_LONGS(KEY_CNT)];
+ unsigned long key[BITS_TO_LONGS(KEY_CNT)];
+ spinlock_t lock;
+ int key_down_target;
+ int key_down;
+ int key_up;
+ int restart_disabled;
+ int (*reset_fn)(void);
+};
+
+int restart_requested;
+static void deferred_restart(struct work_struct *dummy)
+{
+ restart_requested = 2;
+ sys_sync();
+ restart_requested = 3;
+ kernel_restart(NULL);
+}
+static DECLARE_WORK(restart_work, deferred_restart);
+
+static void keyreset_event(struct input_handle *handle, unsigned int type,
+ unsigned int code, int value)
+{
+ unsigned long flags;
+ struct keyreset_state *state = handle->private;
+
+ if (type != EV_KEY)
+ return;
+
+ if (code >= KEY_MAX)
+ return;
+
+ if (!test_bit(code, state->keybit))
+ return;
+
+ spin_lock_irqsave(&state->lock, flags);
+ if (!test_bit(code, state->key) == !value)
+ goto done;
+ __change_bit(code, state->key);
+ if (test_bit(code, state->upbit)) {
+ if (value) {
+ state->restart_disabled = 1;
+ state->key_up++;
+ } else
+ state->key_up--;
+ } else {
+ if (value)
+ state->key_down++;
+ else
+ state->key_down--;
+ }
+ if (state->key_down == 0 && state->key_up == 0)
+ state->restart_disabled = 0;
+
+ pr_debug("reset key changed %d %d new state %d-%d-%d\n", code, value,
+ state->key_down, state->key_up, state->restart_disabled);
+
+ if (value && !state->restart_disabled &&
+ state->key_down == state->key_down_target) {
+ state->restart_disabled = 1;
+ if (restart_requested)
+ panic("keyboard reset failed, %d", restart_requested);
+ if (state->reset_fn) {
+ restart_requested = state->reset_fn();
+ } else {
+ pr_info("keyboard reset\n");
+ schedule_work(&restart_work);
+ restart_requested = 1;
+ }
+ }
+done:
+ spin_unlock_irqrestore(&state->lock, flags);
+}
+
+static int keyreset_connect(struct input_handler *handler,
+ struct input_dev *dev,
+ const struct input_device_id *id)
+{
+ int i;
+ int ret;
+ struct input_handle *handle;
+ struct keyreset_state *state =
+ container_of(handler, struct keyreset_state, input_handler);
+
+ for (i = 0; i < KEY_MAX; i++) {
+ if (test_bit(i, state->keybit) && test_bit(i, dev->keybit))
+ break;
+ }
+ if (i == KEY_MAX)
+ return -ENODEV;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ handle->dev = dev;
+ handle->handler = handler;
+ handle->name = "keyreset";
+ handle->private = state;
+
+ ret = input_register_handle(handle);
+ if (ret)
+ goto err_input_register_handle;
+
+ ret = input_open_device(handle);
+ if (ret)
+ goto err_input_open_device;
+
+ pr_info("using input dev %s for key reset\n", dev->name);
+
+ return 0;
+
+err_input_open_device:
+ input_unregister_handle(handle);
+err_input_register_handle:
+ kfree(handle);
+ return ret;
+}
+
+static void keyreset_disconnect(struct input_handle *handle)
+{
+ input_close_device(handle);
+ input_unregister_handle(handle);
+ kfree(handle);
+}
+
+static const struct input_device_id keyreset_ids[] = {
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+ .evbit = { BIT_MASK(EV_KEY) },
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(input, keyreset_ids);
+
+static int keyreset_probe(struct platform_device *pdev)
+{
+ int ret;
+ int key, *keyp;
+ struct keyreset_state *state;
+ struct keyreset_platform_data *pdata = pdev->dev.platform_data;
+
+ if (!pdata)
+ return -EINVAL;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ spin_lock_init(&state->lock);
+ keyp = pdata->keys_down;
+ while ((key = *keyp++)) {
+ if (key >= KEY_MAX)
+ continue;
+ state->key_down_target++;
+ __set_bit(key, state->keybit);
+ }
+ if (pdata->keys_up) {
+ keyp = pdata->keys_up;
+ while ((key = *keyp++)) {
+ if (key >= KEY_MAX)
+ continue;
+ __set_bit(key, state->keybit);
+ __set_bit(key, state->upbit);
+ }
+ }
+
+ if (pdata->reset_fn)
+ state->reset_fn = pdata->reset_fn;
+
+ state->input_handler.event = keyreset_event;
+ state->input_handler.connect = keyreset_connect;
+ state->input_handler.disconnect = keyreset_disconnect;
+ state->input_handler.name = KEYRESET_NAME;
+ state->input_handler.id_table = keyreset_ids;
+ ret = input_register_handler(&state->input_handler);
+ if (ret) {
+ kfree(state);
+ return ret;
+ }
+ platform_set_drvdata(pdev, state);
+ return 0;
+}
+
+int keyreset_remove(struct platform_device *pdev)
+{
+ struct keyreset_state *state = platform_get_drvdata(pdev);
+ input_unregister_handler(&state->input_handler);
+ kfree(state);
+ return 0;
+}
+
+
+struct platform_driver keyreset_driver = {
+ .driver.name = KEYRESET_NAME,
+ .probe = keyreset_probe,
+ .remove = keyreset_remove,
+};
+
+static int __init keyreset_init(void)
+{
+ return platform_driver_register(&keyreset_driver);
+}
+
+static void __exit keyreset_exit(void)
+{
+ return platform_driver_unregister(&keyreset_driver);
+}
+
+module_init(keyreset_init);
+module_exit(keyreset_exit);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index bb698e1f9e4..4abf046e30b 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -299,6 +299,17 @@ config INPUT_ATI_REMOTE2
To compile this driver as a module, choose M here: the module will be
called ati_remote2.
+config INPUT_KEYCHORD
+ tristate "Key chord input driver support"
+ help
+ Say Y here if you want to enable the key chord driver
+ accessible at /dev/keychord. This driver can be used
+ for receiving notifications when client specified key
+ combinations are pressed.
+
+ To compile this driver as a module, choose M here: the
+ module will be called keychord.
+
config INPUT_KEYSPAN_REMOTE
tristate "Keyspan DMR USB remote control"
depends on USB_ARCH_HAS_HCD
@@ -434,6 +445,11 @@ config INPUT_SGI_BTNS
To compile this driver as a module, choose M here: the
module will be called sgi_btns.
+config INPUT_GPIO
+ tristate "GPIO driver support"
+ help
+ Say Y here if you want to support gpio based keys, wheels etc...
+
config HP_SDC_RTC
tristate "HP SDC Real Time Clock"
depends on (GSC || HP300) && SERIO
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index d7fc17f11d7..6b0e8a67772 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -28,9 +28,11 @@ obj-$(CONFIG_INPUT_DA9055_ONKEY) += da9055_onkey.o
obj-$(CONFIG_INPUT_DM355EVM) += dm355evm_keys.o
obj-$(CONFIG_INPUT_GP2A) += gp2ap002a00f.o
obj-$(CONFIG_INPUT_GPIO_TILT_POLLED) += gpio_tilt_polled.o
+obj-$(CONFIG_INPUT_GPIO) += gpio_event.o gpio_matrix.o gpio_input.o gpio_output.o gpio_axis.o
obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_IMS_PCU) += ims-pcu.o
obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
+obj-$(CONFIG_INPUT_KEYCHORD) += keychord.o
obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o
obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
diff --git a/drivers/input/misc/gpio_axis.c b/drivers/input/misc/gpio_axis.c
new file mode 100644
index 00000000000..0acf4a576f5
--- /dev/null
+++ b/drivers/input/misc/gpio_axis.c
@@ -0,0 +1,192 @@
+/* drivers/input/misc/gpio_axis.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+struct gpio_axis_state {
+ struct gpio_event_input_devs *input_devs;
+ struct gpio_event_axis_info *info;
+ uint32_t pos;
+};
+
+uint16_t gpio_axis_4bit_gray_map_table[] = {
+ [0x0] = 0x0, [0x1] = 0x1, /* 0000 0001 */
+ [0x3] = 0x2, [0x2] = 0x3, /* 0011 0010 */
+ [0x6] = 0x4, [0x7] = 0x5, /* 0110 0111 */
+ [0x5] = 0x6, [0x4] = 0x7, /* 0101 0100 */
+ [0xc] = 0x8, [0xd] = 0x9, /* 1100 1101 */
+ [0xf] = 0xa, [0xe] = 0xb, /* 1111 1110 */
+ [0xa] = 0xc, [0xb] = 0xd, /* 1010 1011 */
+ [0x9] = 0xe, [0x8] = 0xf, /* 1001 1000 */
+};
+uint16_t gpio_axis_4bit_gray_map(struct gpio_event_axis_info *info, uint16_t in)
+{
+ return gpio_axis_4bit_gray_map_table[in];
+}
+
+uint16_t gpio_axis_5bit_singletrack_map_table[] = {
+ [0x10] = 0x00, [0x14] = 0x01, [0x1c] = 0x02, /* 10000 10100 11100 */
+ [0x1e] = 0x03, [0x1a] = 0x04, [0x18] = 0x05, /* 11110 11010 11000 */
+ [0x08] = 0x06, [0x0a] = 0x07, [0x0e] = 0x08, /* 01000 01010 01110 */
+ [0x0f] = 0x09, [0x0d] = 0x0a, [0x0c] = 0x0b, /* 01111 01101 01100 */
+ [0x04] = 0x0c, [0x05] = 0x0d, [0x07] = 0x0e, /* 00100 00101 00111 */
+ [0x17] = 0x0f, [0x16] = 0x10, [0x06] = 0x11, /* 10111 10110 00110 */
+ [0x02] = 0x12, [0x12] = 0x13, [0x13] = 0x14, /* 00010 10010 10011 */
+ [0x1b] = 0x15, [0x0b] = 0x16, [0x03] = 0x17, /* 11011 01011 00011 */
+ [0x01] = 0x18, [0x09] = 0x19, [0x19] = 0x1a, /* 00001 01001 11001 */
+ [0x1d] = 0x1b, [0x15] = 0x1c, [0x11] = 0x1d, /* 11101 10101 10001 */
+};
+uint16_t gpio_axis_5bit_singletrack_map(
+ struct gpio_event_axis_info *info, uint16_t in)
+{
+ return gpio_axis_5bit_singletrack_map_table[in];
+}
+
+static void gpio_event_update_axis(struct gpio_axis_state *as, int report)
+{
+ struct gpio_event_axis_info *ai = as->info;
+ int i;
+ int change;
+ uint16_t state = 0;
+ uint16_t pos;
+ uint16_t old_pos = as->pos;
+ for (i = ai->count - 1; i >= 0; i--)
+ state = (state << 1) | gpio_get_value(ai->gpio[i]);
+ pos = ai->map(ai, state);
+ if (ai->flags & GPIOEAF_PRINT_RAW)
+ pr_info("axis %d-%d raw %x, pos %d -> %d\n",
+ ai->type, ai->code, state, old_pos, pos);
+ if (report && pos != old_pos) {
+ if (ai->type == EV_REL) {
+ change = (ai->decoded_size + pos - old_pos) %
+ ai->decoded_size;
+ if (change > ai->decoded_size / 2)
+ change -= ai->decoded_size;
+ if (change == ai->decoded_size / 2) {
+ if (ai->flags & GPIOEAF_PRINT_EVENT)
+ pr_info("axis %d-%d unknown direction, "
+ "pos %d -> %d\n", ai->type,
+ ai->code, old_pos, pos);
+ change = 0; /* no closest direction */
+ }
+ if (ai->flags & GPIOEAF_PRINT_EVENT)
+ pr_info("axis %d-%d change %d\n",
+ ai->type, ai->code, change);
+ input_report_rel(as->input_devs->dev[ai->dev],
+ ai->code, change);
+ } else {
+ if (ai->flags & GPIOEAF_PRINT_EVENT)
+ pr_info("axis %d-%d now %d\n",
+ ai->type, ai->code, pos);
+ input_event(as->input_devs->dev[ai->dev],
+ ai->type, ai->code, pos);
+ }
+ input_sync(as->input_devs->dev[ai->dev]);
+ }
+ as->pos = pos;
+}
+
+static irqreturn_t gpio_axis_irq_handler(int irq, void *dev_id)
+{
+ struct gpio_axis_state *as = dev_id;
+ gpio_event_update_axis(as, 1);
+ return IRQ_HANDLED;
+}
+
+int gpio_event_axis_func(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data, int func)
+{
+ int ret;
+ int i;
+ int irq;
+ struct gpio_event_axis_info *ai;
+ struct gpio_axis_state *as;
+
+ ai = container_of(info, struct gpio_event_axis_info, info);
+ if (func == GPIO_EVENT_FUNC_SUSPEND) {
+ for (i = 0; i < ai->count; i++)
+ disable_irq(gpio_to_irq(ai->gpio[i]));
+ return 0;
+ }
+ if (func == GPIO_EVENT_FUNC_RESUME) {
+ for (i = 0; i < ai->count; i++)
+ enable_irq(gpio_to_irq(ai->gpio[i]));
+ return 0;
+ }
+
+ if (func == GPIO_EVENT_FUNC_INIT) {
+ *data = as = kmalloc(sizeof(*as), GFP_KERNEL);
+ if (as == NULL) {
+ ret = -ENOMEM;
+ goto err_alloc_axis_state_failed;
+ }
+ as->input_devs = input_devs;
+ as->info = ai;
+ if (ai->dev >= input_devs->count) {
+ pr_err("gpio_event_axis: bad device index %d >= %d "
+ "for %d:%d\n", ai->dev, input_devs->count,
+ ai->type, ai->code);
+ ret = -EINVAL;
+ goto err_bad_device_index;
+ }
+
+ input_set_capability(input_devs->dev[ai->dev],
+ ai->type, ai->code);
+ if (ai->type == EV_ABS) {
+ input_set_abs_params(input_devs->dev[ai->dev], ai->code,
+ 0, ai->decoded_size - 1, 0, 0);
+ }
+ for (i = 0; i < ai->count; i++) {
+ ret = gpio_request(ai->gpio[i], "gpio_event_axis");
+ if (ret < 0)
+ goto err_request_gpio_failed;
+ ret = gpio_direction_input(ai->gpio[i]);
+ if (ret < 0)
+ goto err_gpio_direction_input_failed;
+ ret = irq = gpio_to_irq(ai->gpio[i]);
+ if (ret < 0)
+ goto err_get_irq_num_failed;
+ ret = request_irq(irq, gpio_axis_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ "gpio_event_axis", as);
+ if (ret < 0)
+ goto err_request_irq_failed;
+ }
+ gpio_event_update_axis(as, 0);
+ return 0;
+ }
+
+ ret = 0;
+ as = *data;
+ for (i = ai->count - 1; i >= 0; i--) {
+ free_irq(gpio_to_irq(ai->gpio[i]), as);
+err_request_irq_failed:
+err_get_irq_num_failed:
+err_gpio_direction_input_failed:
+ gpio_free(ai->gpio[i]);
+err_request_gpio_failed:
+ ;
+ }
+err_bad_device_index:
+ kfree(as);
+ *data = NULL;
+err_alloc_axis_state_failed:
+ return ret;
+}
diff --git a/drivers/input/misc/gpio_event.c b/drivers/input/misc/gpio_event.c
new file mode 100644
index 00000000000..90f07eba3ce
--- /dev/null
+++ b/drivers/input/misc/gpio_event.c
@@ -0,0 +1,228 @@
+/* drivers/input/misc/gpio_event.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct gpio_event {
+ struct gpio_event_input_devs *input_devs;
+ const struct gpio_event_platform_data *info;
+ void *state[0];
+};
+
+static int gpio_input_event(
+ struct input_dev *dev, unsigned int type, unsigned int code, int value)
+{
+ int i;
+ int devnr;
+ int ret = 0;
+ int tmp_ret;
+ struct gpio_event_info **ii;
+ struct gpio_event *ip = input_get_drvdata(dev);
+
+ for (devnr = 0; devnr < ip->input_devs->count; devnr++)
+ if (ip->input_devs->dev[devnr] == dev)
+ break;
+ if (devnr == ip->input_devs->count) {
+ pr_err("gpio_input_event: unknown device %p\n", dev);
+ return -EIO;
+ }
+
+ for (i = 0, ii = ip->info->info; i < ip->info->info_count; i++, ii++) {
+ if ((*ii)->event) {
+ tmp_ret = (*ii)->event(ip->input_devs, *ii,
+ &ip->state[i],
+ devnr, type, code, value);
+ if (tmp_ret)
+ ret = tmp_ret;
+ }
+ }
+ return ret;
+}
+
+static int gpio_event_call_all_func(struct gpio_event *ip, int func)
+{
+ int i;
+ int ret;
+ struct gpio_event_info **ii;
+
+ if (func == GPIO_EVENT_FUNC_INIT || func == GPIO_EVENT_FUNC_RESUME) {
+ ii = ip->info->info;
+ for (i = 0; i < ip->info->info_count; i++, ii++) {
+ if ((*ii)->func == NULL) {
+ ret = -ENODEV;
+ pr_err("gpio_event_probe: Incomplete pdata, "
+ "no function\n");
+ goto err_no_func;
+ }
+ if (func == GPIO_EVENT_FUNC_RESUME && (*ii)->no_suspend)
+ continue;
+ ret = (*ii)->func(ip->input_devs, *ii, &ip->state[i],
+ func);
+ if (ret) {
+ pr_err("gpio_event_probe: function failed\n");
+ goto err_func_failed;
+ }
+ }
+ return 0;
+ }
+
+ ret = 0;
+ i = ip->info->info_count;
+ ii = ip->info->info + i;
+ while (i > 0) {
+ i--;
+ ii--;
+ if ((func & ~1) == GPIO_EVENT_FUNC_SUSPEND && (*ii)->no_suspend)
+ continue;
+ (*ii)->func(ip->input_devs, *ii, &ip->state[i], func & ~1);
+err_func_failed:
+err_no_func:
+ ;
+ }
+ return ret;
+}
+
+static void __maybe_unused gpio_event_suspend(struct gpio_event *ip)
+{
+ gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_SUSPEND);
+ if (ip->info->power)
+ ip->info->power(ip->info, 0);
+}
+
+static void __maybe_unused gpio_event_resume(struct gpio_event *ip)
+{
+ if (ip->info->power)
+ ip->info->power(ip->info, 1);
+ gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_RESUME);
+}
+
+static int gpio_event_probe(struct platform_device *pdev)
+{
+ int err;
+ struct gpio_event *ip;
+ struct gpio_event_platform_data *event_info;
+ int dev_count = 1;
+ int i;
+ int registered = 0;
+
+ event_info = pdev->dev.platform_data;
+ if (event_info == NULL) {
+ pr_err("gpio_event_probe: No pdata\n");
+ return -ENODEV;
+ }
+ if ((!event_info->name && !event_info->names[0]) ||
+ !event_info->info || !event_info->info_count) {
+ pr_err("gpio_event_probe: Incomplete pdata\n");
+ return -ENODEV;
+ }
+ if (!event_info->name)
+ while (event_info->names[dev_count])
+ dev_count++;
+ ip = kzalloc(sizeof(*ip) +
+ sizeof(ip->state[0]) * event_info->info_count +
+ sizeof(*ip->input_devs) +
+ sizeof(ip->input_devs->dev[0]) * dev_count, GFP_KERNEL);
+ if (ip == NULL) {
+ err = -ENOMEM;
+ pr_err("gpio_event_probe: Failed to allocate private data\n");
+ goto err_kp_alloc_failed;
+ }
+ ip->input_devs = (void*)&ip->state[event_info->info_count];
+ platform_set_drvdata(pdev, ip);
+
+ for (i = 0; i < dev_count; i++) {
+ struct input_dev *input_dev = input_allocate_device();
+ if (input_dev == NULL) {
+ err = -ENOMEM;
+ pr_err("gpio_event_probe: "
+ "Failed to allocate input device\n");
+ goto err_input_dev_alloc_failed;
+ }
+ input_set_drvdata(input_dev, ip);
+ input_dev->name = event_info->name ?
+ event_info->name : event_info->names[i];
+ input_dev->event = gpio_input_event;
+ ip->input_devs->dev[i] = input_dev;
+ }
+ ip->input_devs->count = dev_count;
+ ip->info = event_info;
+ if (event_info->power)
+ ip->info->power(ip->info, 1);
+
+ err = gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_INIT);
+ if (err)
+ goto err_call_all_func_failed;
+
+ for (i = 0; i < dev_count; i++) {
+ err = input_register_device(ip->input_devs->dev[i]);
+ if (err) {
+ pr_err("gpio_event_probe: Unable to register %s "
+ "input device\n", ip->input_devs->dev[i]->name);
+ goto err_input_register_device_failed;
+ }
+ registered++;
+ }
+
+ return 0;
+
+err_input_register_device_failed:
+ gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT);
+err_call_all_func_failed:
+ if (event_info->power)
+ ip->info->power(ip->info, 0);
+ for (i = 0; i < registered; i++)
+ input_unregister_device(ip->input_devs->dev[i]);
+ for (i = dev_count - 1; i >= registered; i--) {
+ input_free_device(ip->input_devs->dev[i]);
+err_input_dev_alloc_failed:
+ ;
+ }
+ kfree(ip);
+err_kp_alloc_failed:
+ return err;
+}
+
+static int gpio_event_remove(struct platform_device *pdev)
+{
+ struct gpio_event *ip = platform_get_drvdata(pdev);
+ int i;
+
+ gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT);
+ if (ip->info->power)
+ ip->info->power(ip->info, 0);
+ for (i = 0; i < ip->input_devs->count; i++)
+ input_unregister_device(ip->input_devs->dev[i]);
+ kfree(ip);
+ return 0;
+}
+
+static struct platform_driver gpio_event_driver = {
+ .probe = gpio_event_probe,
+ .remove = gpio_event_remove,
+ .driver = {
+ .name = GPIO_EVENT_DEV_NAME,
+ },
+};
+
+module_platform_driver(gpio_event_driver);
+
+MODULE_DESCRIPTION("GPIO Event Driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/input/misc/gpio_input.c b/drivers/input/misc/gpio_input.c
new file mode 100644
index 00000000000..eefd02725af
--- /dev/null
+++ b/drivers/input/misc/gpio_input.c
@@ -0,0 +1,390 @@
+/* drivers/input/misc/gpio_input.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/pm_wakeup.h>
+
+enum {
+ DEBOUNCE_UNSTABLE = BIT(0), /* Got irq, while debouncing */
+ DEBOUNCE_PRESSED = BIT(1),
+ DEBOUNCE_NOTPRESSED = BIT(2),
+ DEBOUNCE_WAIT_IRQ = BIT(3), /* Stable irq state */
+ DEBOUNCE_POLL = BIT(4), /* Stable polling state */
+
+ DEBOUNCE_UNKNOWN =
+ DEBOUNCE_PRESSED | DEBOUNCE_NOTPRESSED,
+};
+
+struct gpio_key_state {
+ struct gpio_input_state *ds;
+ uint8_t debounce;
+};
+
+struct gpio_input_state {
+ struct gpio_event_input_devs *input_devs;
+ const struct gpio_event_input_info *info;
+ struct hrtimer timer;
+ int use_irq;
+ int debounce_count;
+ spinlock_t irq_lock;
+ struct wakeup_source *ws;
+ struct gpio_key_state key_state[0];
+};
+
+static enum hrtimer_restart gpio_event_input_timer_func(struct hrtimer *timer)
+{
+ int i;
+ int pressed;
+ struct gpio_input_state *ds =
+ container_of(timer, struct gpio_input_state, timer);
+ unsigned gpio_flags = ds->info->flags;
+ unsigned npolarity;
+ int nkeys = ds->info->keymap_size;
+ const struct gpio_event_direct_entry *key_entry;
+ struct gpio_key_state *key_state;
+ unsigned long irqflags;
+ uint8_t debounce;
+ bool sync_needed;
+
+#if 0
+ key_entry = kp->keys_info->keymap;
+ key_state = kp->key_state;
+ for (i = 0; i < nkeys; i++, key_entry++, key_state++)
+ pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio,
+ gpio_read_detect_status(key_entry->gpio));
+#endif
+ key_entry = ds->info->keymap;
+ key_state = ds->key_state;
+ sync_needed = false;
+ spin_lock_irqsave(&ds->irq_lock, irqflags);
+ for (i = 0; i < nkeys; i++, key_entry++, key_state++) {
+ debounce = key_state->debounce;
+ if (debounce & DEBOUNCE_WAIT_IRQ)
+ continue;
+ if (key_state->debounce & DEBOUNCE_UNSTABLE) {
+ debounce = key_state->debounce = DEBOUNCE_UNKNOWN;
+ enable_irq(gpio_to_irq(key_entry->gpio));
+ if (gpio_flags & GPIOEDF_PRINT_KEY_UNSTABLE)
+ pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+ "(%d) continue debounce\n",
+ ds->info->type, key_entry->code,
+ i, key_entry->gpio);
+ }
+ npolarity = !(gpio_flags & GPIOEDF_ACTIVE_HIGH);
+ pressed = gpio_get_value(key_entry->gpio) ^ npolarity;
+ if (debounce & DEBOUNCE_POLL) {
+ if (pressed == !(debounce & DEBOUNCE_PRESSED)) {
+ ds->debounce_count++;
+ key_state->debounce = DEBOUNCE_UNKNOWN;
+ if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+ pr_info("gpio_keys_scan_keys: key %x-"
+ "%x, %d (%d) start debounce\n",
+ ds->info->type, key_entry->code,
+ i, key_entry->gpio);
+ }
+ continue;
+ }
+ if (pressed && (debounce & DEBOUNCE_NOTPRESSED)) {
+ if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+ pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+ "(%d) debounce pressed 1\n",
+ ds->info->type, key_entry->code,
+ i, key_entry->gpio);
+ key_state->debounce = DEBOUNCE_PRESSED;
+ continue;
+ }
+ if (!pressed && (debounce & DEBOUNCE_PRESSED)) {
+ if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+ pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+ "(%d) debounce pressed 0\n",
+ ds->info->type, key_entry->code,
+ i, key_entry->gpio);
+ key_state->debounce = DEBOUNCE_NOTPRESSED;
+ continue;
+ }
+ /* key is stable */
+ ds->debounce_count--;
+ if (ds->use_irq)
+ key_state->debounce |= DEBOUNCE_WAIT_IRQ;
+ else
+ key_state->debounce |= DEBOUNCE_POLL;
+ if (gpio_flags & GPIOEDF_PRINT_KEYS)
+ pr_info("gpio_keys_scan_keys: key %x-%x, %d (%d) "
+ "changed to %d\n", ds->info->type,
+ key_entry->code, i, key_entry->gpio, pressed);
+ input_event(ds->input_devs->dev[key_entry->dev], ds->info->type,
+ key_entry->code, pressed);
+ sync_needed = true;
+ }
+ if (sync_needed) {
+ for (i = 0; i < ds->input_devs->count; i++)
+ input_sync(ds->input_devs->dev[i]);
+ }
+
+#if 0
+ key_entry = kp->keys_info->keymap;
+ key_state = kp->key_state;
+ for (i = 0; i < nkeys; i++, key_entry++, key_state++) {
+ pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio,
+ gpio_read_detect_status(key_entry->gpio));
+ }
+#endif
+
+ if (ds->debounce_count)
+ hrtimer_start(timer, ds->info->debounce_time, HRTIMER_MODE_REL);
+ else if (!ds->use_irq)
+ hrtimer_start(timer, ds->info->poll_time, HRTIMER_MODE_REL);
+ else
+ __pm_relax(ds->ws);
+
+ spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+
+ return HRTIMER_NORESTART;
+}
+
+static irqreturn_t gpio_event_input_irq_handler(int irq, void *dev_id)
+{
+ struct gpio_key_state *ks = dev_id;
+ struct gpio_input_state *ds = ks->ds;
+ int keymap_index = ks - ds->key_state;
+ const struct gpio_event_direct_entry *key_entry;
+ unsigned long irqflags;
+ int pressed;
+
+ if (!ds->use_irq)
+ return IRQ_HANDLED;
+
+ key_entry = &ds->info->keymap[keymap_index];
+
+ if (ds->info->debounce_time.tv64) {
+ spin_lock_irqsave(&ds->irq_lock, irqflags);
+ if (ks->debounce & DEBOUNCE_WAIT_IRQ) {
+ ks->debounce = DEBOUNCE_UNKNOWN;
+ if (ds->debounce_count++ == 0) {
+ __pm_stay_awake(ds->ws);
+ hrtimer_start(
+ &ds->timer, ds->info->debounce_time,
+ HRTIMER_MODE_REL);
+ }
+ if (ds->info->flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+ pr_info("gpio_event_input_irq_handler: "
+ "key %x-%x, %d (%d) start debounce\n",
+ ds->info->type, key_entry->code,
+ keymap_index, key_entry->gpio);
+ } else {
+ disable_irq_nosync(irq);
+ ks->debounce = DEBOUNCE_UNSTABLE;
+ }
+ spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+ } else {
+ pressed = gpio_get_value(key_entry->gpio) ^
+ !(ds->info->flags & GPIOEDF_ACTIVE_HIGH);
+ if (ds->info->flags & GPIOEDF_PRINT_KEYS)
+ pr_info("gpio_event_input_irq_handler: key %x-%x, %d "
+ "(%d) changed to %d\n",
+ ds->info->type, key_entry->code, keymap_index,
+ key_entry->gpio, pressed);
+ input_event(ds->input_devs->dev[key_entry->dev], ds->info->type,
+ key_entry->code, pressed);
+ input_sync(ds->input_devs->dev[key_entry->dev]);
+ }
+ return IRQ_HANDLED;
+}
+
+static int gpio_event_input_request_irqs(struct gpio_input_state *ds)
+{
+ int i;
+ int err;
+ unsigned int irq;
+ unsigned long req_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
+
+ for (i = 0; i < ds->info->keymap_size; i++) {
+ err = irq = gpio_to_irq(ds->info->keymap[i].gpio);
+ if (err < 0)
+ goto err_gpio_get_irq_num_failed;
+ err = request_irq(irq, gpio_event_input_irq_handler,
+ req_flags, "gpio_keys", &ds->key_state[i]);
+ if (err) {
+ pr_err("gpio_event_input_request_irqs: request_irq "
+ "failed for input %d, irq %d\n",
+ ds->info->keymap[i].gpio, irq);
+ goto err_request_irq_failed;
+ }
+ if (ds->info->info.no_suspend) {
+ err = enable_irq_wake(irq);
+ if (err) {
+ pr_err("gpio_event_input_request_irqs: "
+ "enable_irq_wake failed for input %d, "
+ "irq %d\n",
+ ds->info->keymap[i].gpio, irq);
+ goto err_enable_irq_wake_failed;
+ }
+ }
+ }
+ return 0;
+
+ for (i = ds->info->keymap_size - 1; i >= 0; i--) {
+ irq = gpio_to_irq(ds->info->keymap[i].gpio);
+ if (ds->info->info.no_suspend)
+ disable_irq_wake(irq);
+err_enable_irq_wake_failed:
+ free_irq(irq, &ds->key_state[i]);
+err_request_irq_failed:
+err_gpio_get_irq_num_failed:
+ ;
+ }
+ return err;
+}
+
+int gpio_event_input_func(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data, int func)
+{
+ int ret;
+ int i;
+ unsigned long irqflags;
+ struct gpio_event_input_info *di;
+ struct gpio_input_state *ds = *data;
+ char *wlname;
+
+ di = container_of(info, struct gpio_event_input_info, info);
+
+ if (func == GPIO_EVENT_FUNC_SUSPEND) {
+ if (ds->use_irq)
+ for (i = 0; i < di->keymap_size; i++)
+ disable_irq(gpio_to_irq(di->keymap[i].gpio));
+ hrtimer_cancel(&ds->timer);
+ return 0;
+ }
+ if (func == GPIO_EVENT_FUNC_RESUME) {
+ spin_lock_irqsave(&ds->irq_lock, irqflags);
+ if (ds->use_irq)
+ for (i = 0; i < di->keymap_size; i++)
+ enable_irq(gpio_to_irq(di->keymap[i].gpio));
+ hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+ spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+ return 0;
+ }
+
+ if (func == GPIO_EVENT_FUNC_INIT) {
+ if (ktime_to_ns(di->poll_time) <= 0)
+ di->poll_time = ktime_set(0, 20 * NSEC_PER_MSEC);
+
+ *data = ds = kzalloc(sizeof(*ds) + sizeof(ds->key_state[0]) *
+ di->keymap_size, GFP_KERNEL);
+ if (ds == NULL) {
+ ret = -ENOMEM;
+ pr_err("gpio_event_input_func: "
+ "Failed to allocate private data\n");
+ goto err_ds_alloc_failed;
+ }
+ ds->debounce_count = di->keymap_size;
+ ds->input_devs = input_devs;
+ ds->info = di;
+ wlname = kasprintf(GFP_KERNEL, "gpio_input:%s%s",
+ input_devs->dev[0]->name,
+ (input_devs->count > 1) ? "..." : "");
+
+ ds->ws = wakeup_source_register(wlname);
+ kfree(wlname);
+ if (!ds->ws) {
+ ret = -ENOMEM;
+ pr_err("gpio_event_input_func: "
+ "Failed to allocate wakeup source\n");
+ goto err_ws_failed;
+ }
+
+ spin_lock_init(&ds->irq_lock);
+
+ for (i = 0; i < di->keymap_size; i++) {
+ int dev = di->keymap[i].dev;
+ if (dev >= input_devs->count) {
+ pr_err("gpio_event_input_func: bad device "
+ "index %d >= %d for key code %d\n",
+ dev, input_devs->count,
+ di->keymap[i].code);
+ ret = -EINVAL;
+ goto err_bad_keymap;
+ }
+ input_set_capability(input_devs->dev[dev], di->type,
+ di->keymap[i].code);
+ ds->key_state[i].ds = ds;
+ ds->key_state[i].debounce = DEBOUNCE_UNKNOWN;
+ }
+
+ for (i = 0; i < di->keymap_size; i++) {
+ ret = gpio_request(di->keymap[i].gpio, "gpio_kp_in");
+ if (ret) {
+ pr_err("gpio_event_input_func: gpio_request "
+ "failed for %d\n", di->keymap[i].gpio);
+ goto err_gpio_request_failed;
+ }
+ ret = gpio_direction_input(di->keymap[i].gpio);
+ if (ret) {
+ pr_err("gpio_event_input_func: "
+ "gpio_direction_input failed for %d\n",
+ di->keymap[i].gpio);
+ goto err_gpio_configure_failed;
+ }
+ }
+
+ ret = gpio_event_input_request_irqs(ds);
+
+ spin_lock_irqsave(&ds->irq_lock, irqflags);
+ ds->use_irq = ret == 0;
+
+ pr_info("GPIO Input Driver: Start gpio inputs for %s%s in %s "
+ "mode\n", input_devs->dev[0]->name,
+ (input_devs->count > 1) ? "..." : "",
+ ret == 0 ? "interrupt" : "polling");
+
+ hrtimer_init(&ds->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ds->timer.function = gpio_event_input_timer_func;
+ hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+ spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+ return 0;
+ }
+
+ ret = 0;
+ spin_lock_irqsave(&ds->irq_lock, irqflags);
+ hrtimer_cancel(&ds->timer);
+ if (ds->use_irq) {
+ for (i = di->keymap_size - 1; i >= 0; i--) {
+ int irq = gpio_to_irq(di->keymap[i].gpio);
+ if (ds->info->info.no_suspend)
+ disable_irq_wake(irq);
+ free_irq(irq, &ds->key_state[i]);
+ }
+ }
+ spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+
+ for (i = di->keymap_size - 1; i >= 0; i--) {
+err_gpio_configure_failed:
+ gpio_free(di->keymap[i].gpio);
+err_gpio_request_failed:
+ ;
+ }
+err_bad_keymap:
+ wakeup_source_unregister(ds->ws);
+err_ws_failed:
+ kfree(ds);
+err_ds_alloc_failed:
+ return ret;
+}
diff --git a/drivers/input/misc/gpio_matrix.c b/drivers/input/misc/gpio_matrix.c
new file mode 100644
index 00000000000..eaa9e89d473
--- /dev/null
+++ b/drivers/input/misc/gpio_matrix.c
@@ -0,0 +1,441 @@
+/* drivers/input/misc/gpio_matrix.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/wakelock.h>
+
+struct gpio_kp {
+ struct gpio_event_input_devs *input_devs;
+ struct gpio_event_matrix_info *keypad_info;
+ struct hrtimer timer;
+ struct wake_lock wake_lock;
+ int current_output;
+ unsigned int use_irq:1;
+ unsigned int key_state_changed:1;
+ unsigned int last_key_state_changed:1;
+ unsigned int some_keys_pressed:2;
+ unsigned int disabled_irq:1;
+ unsigned long keys_pressed[0];
+};
+
+static void clear_phantom_key(struct gpio_kp *kp, int out, int in)
+{
+ struct gpio_event_matrix_info *mi = kp->keypad_info;
+ int key_index = out * mi->ninputs + in;
+ unsigned short keyentry = mi->keymap[key_index];
+ unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+ unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+
+ if (!test_bit(keycode, kp->input_devs->dev[dev]->key)) {
+ if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS)
+ pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) "
+ "cleared\n", keycode, out, in,
+ mi->output_gpios[out], mi->input_gpios[in]);
+ __clear_bit(key_index, kp->keys_pressed);
+ } else {
+ if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS)
+ pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) "
+ "not cleared\n", keycode, out, in,
+ mi->output_gpios[out], mi->input_gpios[in]);
+ }
+}
+
+static int restore_keys_for_input(struct gpio_kp *kp, int out, int in)
+{
+ int rv = 0;
+ int key_index;
+
+ key_index = out * kp->keypad_info->ninputs + in;
+ while (out < kp->keypad_info->noutputs) {
+ if (test_bit(key_index, kp->keys_pressed)) {
+ rv = 1;
+ clear_phantom_key(kp, out, in);
+ }
+ key_index += kp->keypad_info->ninputs;
+ out++;
+ }
+ return rv;
+}
+
+static void remove_phantom_keys(struct gpio_kp *kp)
+{
+ int out, in, inp;
+ int key_index;
+
+ if (kp->some_keys_pressed < 3)
+ return;
+
+ for (out = 0; out < kp->keypad_info->noutputs; out++) {
+ inp = -1;
+ key_index = out * kp->keypad_info->ninputs;
+ for (in = 0; in < kp->keypad_info->ninputs; in++, key_index++) {
+ if (test_bit(key_index, kp->keys_pressed)) {
+ if (inp == -1) {
+ inp = in;
+ continue;
+ }
+ if (inp >= 0) {
+ if (!restore_keys_for_input(kp, out + 1,
+ inp))
+ break;
+ clear_phantom_key(kp, out, inp);
+ inp = -2;
+ }
+ restore_keys_for_input(kp, out, in);
+ }
+ }
+ }
+}
+
+static void report_key(struct gpio_kp *kp, int key_index, int out, int in)
+{
+ struct gpio_event_matrix_info *mi = kp->keypad_info;
+ int pressed = test_bit(key_index, kp->keys_pressed);
+ unsigned short keyentry = mi->keymap[key_index];
+ unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+ unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+
+ if (pressed != test_bit(keycode, kp->input_devs->dev[dev]->key)) {
+ if (keycode == KEY_RESERVED) {
+ if (mi->flags & GPIOKPF_PRINT_UNMAPPED_KEYS)
+ pr_info("gpiomatrix: unmapped key, %d-%d "
+ "(%d-%d) changed to %d\n",
+ out, in, mi->output_gpios[out],
+ mi->input_gpios[in], pressed);
+ } else {
+ if (mi->flags & GPIOKPF_PRINT_MAPPED_KEYS)
+ pr_info("gpiomatrix: key %x, %d-%d (%d-%d) "
+ "changed to %d\n", keycode,
+ out, in, mi->output_gpios[out],
+ mi->input_gpios[in], pressed);
+ input_report_key(kp->input_devs->dev[dev], keycode, pressed);
+ }
+ }
+}
+
+static void report_sync(struct gpio_kp *kp)
+{
+ int i;
+
+ for (i = 0; i < kp->input_devs->count; i++)
+ input_sync(kp->input_devs->dev[i]);
+}
+
+static enum hrtimer_restart gpio_keypad_timer_func(struct hrtimer *timer)
+{
+ int out, in;
+ int key_index;
+ int gpio;
+ struct gpio_kp *kp = container_of(timer, struct gpio_kp, timer);
+ struct gpio_event_matrix_info *mi = kp->keypad_info;
+ unsigned gpio_keypad_flags = mi->flags;
+ unsigned polarity = !!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH);
+
+ out = kp->current_output;
+ if (out == mi->noutputs) {
+ out = 0;
+ kp->last_key_state_changed = kp->key_state_changed;
+ kp->key_state_changed = 0;
+ kp->some_keys_pressed = 0;
+ } else {
+ key_index = out * mi->ninputs;
+ for (in = 0; in < mi->ninputs; in++, key_index++) {
+ gpio = mi->input_gpios[in];
+ if (gpio_get_value(gpio) ^ !polarity) {
+ if (kp->some_keys_pressed < 3)
+ kp->some_keys_pressed++;
+ kp->key_state_changed |= !__test_and_set_bit(
+ key_index, kp->keys_pressed);
+ } else
+ kp->key_state_changed |= __test_and_clear_bit(
+ key_index, kp->keys_pressed);
+ }
+ gpio = mi->output_gpios[out];
+ if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+ gpio_set_value(gpio, !polarity);
+ else
+ gpio_direction_input(gpio);
+ out++;
+ }
+ kp->current_output = out;
+ if (out < mi->noutputs) {
+ gpio = mi->output_gpios[out];
+ if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+ gpio_set_value(gpio, polarity);
+ else
+ gpio_direction_output(gpio, polarity);
+ hrtimer_start(timer, mi->settle_time, HRTIMER_MODE_REL);
+ return HRTIMER_NORESTART;
+ }
+ if (gpio_keypad_flags & GPIOKPF_DEBOUNCE) {
+ if (kp->key_state_changed) {
+ hrtimer_start(&kp->timer, mi->debounce_delay,
+ HRTIMER_MODE_REL);
+ return HRTIMER_NORESTART;
+ }
+ kp->key_state_changed = kp->last_key_state_changed;
+ }
+ if (kp->key_state_changed) {
+ if (gpio_keypad_flags & GPIOKPF_REMOVE_SOME_PHANTOM_KEYS)
+ remove_phantom_keys(kp);
+ key_index = 0;
+ for (out = 0; out < mi->noutputs; out++)
+ for (in = 0; in < mi->ninputs; in++, key_index++)
+ report_key(kp, key_index, out, in);
+ report_sync(kp);
+ }
+ if (!kp->use_irq || kp->some_keys_pressed) {
+ hrtimer_start(timer, mi->poll_time, HRTIMER_MODE_REL);
+ return HRTIMER_NORESTART;
+ }
+
+ /* No keys are pressed, reenable interrupt */
+ for (out = 0; out < mi->noutputs; out++) {
+ if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+ gpio_set_value(mi->output_gpios[out], polarity);
+ else
+ gpio_direction_output(mi->output_gpios[out], polarity);
+ }
+ for (in = 0; in < mi->ninputs; in++)
+ enable_irq(gpio_to_irq(mi->input_gpios[in]));
+ wake_unlock(&kp->wake_lock);
+ return HRTIMER_NORESTART;
+}
+
+static irqreturn_t gpio_keypad_irq_handler(int irq_in, void *dev_id)
+{
+ int i;
+ struct gpio_kp *kp = dev_id;
+ struct gpio_event_matrix_info *mi = kp->keypad_info;
+ unsigned gpio_keypad_flags = mi->flags;
+
+ if (!kp->use_irq) {
+ /* ignore interrupt while registering the handler */
+ kp->disabled_irq = 1;
+ disable_irq_nosync(irq_in);
+ return IRQ_HANDLED;
+ }
+
+ for (i = 0; i < mi->ninputs; i++)
+ disable_irq_nosync(gpio_to_irq(mi->input_gpios[i]));
+ for (i = 0; i < mi->noutputs; i++) {
+ if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+ gpio_set_value(mi->output_gpios[i],
+ !(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH));
+ else
+ gpio_direction_input(mi->output_gpios[i]);
+ }
+ wake_lock(&kp->wake_lock);
+ hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+ return IRQ_HANDLED;
+}
+
+static int gpio_keypad_request_irqs(struct gpio_kp *kp)
+{
+ int i;
+ int err;
+ unsigned int irq;
+ unsigned long request_flags;
+ struct gpio_event_matrix_info *mi = kp->keypad_info;
+
+ switch (mi->flags & (GPIOKPF_ACTIVE_HIGH|GPIOKPF_LEVEL_TRIGGERED_IRQ)) {
+ default:
+ request_flags = IRQF_TRIGGER_FALLING;
+ break;
+ case GPIOKPF_ACTIVE_HIGH:
+ request_flags = IRQF_TRIGGER_RISING;
+ break;
+ case GPIOKPF_LEVEL_TRIGGERED_IRQ:
+ request_flags = IRQF_TRIGGER_LOW;
+ break;
+ case GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_ACTIVE_HIGH:
+ request_flags = IRQF_TRIGGER_HIGH;
+ break;
+ }
+
+ for (i = 0; i < mi->ninputs; i++) {
+ err = irq = gpio_to_irq(mi->input_gpios[i]);
+ if (err < 0)
+ goto err_gpio_get_irq_num_failed;
+ err = request_irq(irq, gpio_keypad_irq_handler, request_flags,
+ "gpio_kp", kp);
+ if (err) {
+ pr_err("gpiomatrix: request_irq failed for input %d, "
+ "irq %d\n", mi->input_gpios[i], irq);
+ goto err_request_irq_failed;
+ }
+ err = enable_irq_wake(irq);
+ if (err) {
+ pr_err("gpiomatrix: set_irq_wake failed for input %d, "
+ "irq %d\n", mi->input_gpios[i], irq);
+ }
+ disable_irq(irq);
+ if (kp->disabled_irq) {
+ kp->disabled_irq = 0;
+ enable_irq(irq);
+ }
+ }
+ return 0;
+
+ for (i = mi->noutputs - 1; i >= 0; i--) {
+ free_irq(gpio_to_irq(mi->input_gpios[i]), kp);
+err_request_irq_failed:
+err_gpio_get_irq_num_failed:
+ ;
+ }
+ return err;
+}
+
+int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data, int func)
+{
+ int i;
+ int err;
+ int key_count;
+ struct gpio_kp *kp;
+ struct gpio_event_matrix_info *mi;
+
+ mi = container_of(info, struct gpio_event_matrix_info, info);
+ if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME) {
+ /* TODO: disable scanning */
+ return 0;
+ }
+
+ if (func == GPIO_EVENT_FUNC_INIT) {
+ if (mi->keymap == NULL ||
+ mi->input_gpios == NULL ||
+ mi->output_gpios == NULL) {
+ err = -ENODEV;
+ pr_err("gpiomatrix: Incomplete pdata\n");
+ goto err_invalid_platform_data;
+ }
+ key_count = mi->ninputs * mi->noutputs;
+
+ *data = kp = kzalloc(sizeof(*kp) + sizeof(kp->keys_pressed[0]) *
+ BITS_TO_LONGS(key_count), GFP_KERNEL);
+ if (kp == NULL) {
+ err = -ENOMEM;
+ pr_err("gpiomatrix: Failed to allocate private data\n");
+ goto err_kp_alloc_failed;
+ }
+ kp->input_devs = input_devs;
+ kp->keypad_info = mi;
+ for (i = 0; i < key_count; i++) {
+ unsigned short keyentry = mi->keymap[i];
+ unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+ unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+ if (dev >= input_devs->count) {
+ pr_err("gpiomatrix: bad device index %d >= "
+ "%d for key code %d\n",
+ dev, input_devs->count, keycode);
+ err = -EINVAL;
+ goto err_bad_keymap;
+ }
+ if (keycode && keycode <= KEY_MAX)
+ input_set_capability(input_devs->dev[dev],
+ EV_KEY, keycode);
+ }
+
+ for (i = 0; i < mi->noutputs; i++) {
+ err = gpio_request(mi->output_gpios[i], "gpio_kp_out");
+ if (err) {
+ pr_err("gpiomatrix: gpio_request failed for "
+ "output %d\n", mi->output_gpios[i]);
+ goto err_request_output_gpio_failed;
+ }
+ if (gpio_cansleep(mi->output_gpios[i])) {
+ pr_err("gpiomatrix: unsupported output gpio %d,"
+ " can sleep\n", mi->output_gpios[i]);
+ err = -EINVAL;
+ goto err_output_gpio_configure_failed;
+ }
+ if (mi->flags & GPIOKPF_DRIVE_INACTIVE)
+ err = gpio_direction_output(mi->output_gpios[i],
+ !(mi->flags & GPIOKPF_ACTIVE_HIGH));
+ else
+ err = gpio_direction_input(mi->output_gpios[i]);
+ if (err) {
+ pr_err("gpiomatrix: gpio_configure failed for "
+ "output %d\n", mi->output_gpios[i]);
+ goto err_output_gpio_configure_failed;
+ }
+ }
+ for (i = 0; i < mi->ninputs; i++) {
+ err = gpio_request(mi->input_gpios[i], "gpio_kp_in");
+ if (err) {
+ pr_err("gpiomatrix: gpio_request failed for "
+ "input %d\n", mi->input_gpios[i]);
+ goto err_request_input_gpio_failed;
+ }
+ err = gpio_direction_input(mi->input_gpios[i]);
+ if (err) {
+ pr_err("gpiomatrix: gpio_direction_input failed"
+ " for input %d\n", mi->input_gpios[i]);
+ goto err_gpio_direction_input_failed;
+ }
+ }
+ kp->current_output = mi->noutputs;
+ kp->key_state_changed = 1;
+
+ hrtimer_init(&kp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ kp->timer.function = gpio_keypad_timer_func;
+ wake_lock_init(&kp->wake_lock, WAKE_LOCK_SUSPEND, "gpio_kp");
+ err = gpio_keypad_request_irqs(kp);
+ kp->use_irq = err == 0;
+
+ pr_info("GPIO Matrix Keypad Driver: Start keypad matrix for "
+ "%s%s in %s mode\n", input_devs->dev[0]->name,
+ (input_devs->count > 1) ? "..." : "",
+ kp->use_irq ? "interrupt" : "polling");
+
+ if (kp->use_irq)
+ wake_lock(&kp->wake_lock);
+ hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+
+ return 0;
+ }
+
+ err = 0;
+ kp = *data;
+
+ if (kp->use_irq)
+ for (i = mi->noutputs - 1; i >= 0; i--)
+ free_irq(gpio_to_irq(mi->input_gpios[i]), kp);
+
+ hrtimer_cancel(&kp->timer);
+ wake_lock_destroy(&kp->wake_lock);
+ for (i = mi->noutputs - 1; i >= 0; i--) {
+err_gpio_direction_input_failed:
+ gpio_free(mi->input_gpios[i]);
+err_request_input_gpio_failed:
+ ;
+ }
+ for (i = mi->noutputs - 1; i >= 0; i--) {
+err_output_gpio_configure_failed:
+ gpio_free(mi->output_gpios[i]);
+err_request_output_gpio_failed:
+ ;
+ }
+err_bad_keymap:
+ kfree(kp);
+err_kp_alloc_failed:
+err_invalid_platform_data:
+ return err;
+}
diff --git a/drivers/input/misc/gpio_output.c b/drivers/input/misc/gpio_output.c
new file mode 100644
index 00000000000..2aac2fad0a1
--- /dev/null
+++ b/drivers/input/misc/gpio_output.c
@@ -0,0 +1,97 @@
+/* drivers/input/misc/gpio_output.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+
+int gpio_event_output_event(
+ struct gpio_event_input_devs *input_devs, struct gpio_event_info *info,
+ void **data, unsigned int dev, unsigned int type,
+ unsigned int code, int value)
+{
+ int i;
+ struct gpio_event_output_info *oi;
+ oi = container_of(info, struct gpio_event_output_info, info);
+ if (type != oi->type)
+ return 0;
+ if (!(oi->flags & GPIOEDF_ACTIVE_HIGH))
+ value = !value;
+ for (i = 0; i < oi->keymap_size; i++)
+ if (dev == oi->keymap[i].dev && code == oi->keymap[i].code)
+ gpio_set_value(oi->keymap[i].gpio, value);
+ return 0;
+}
+
+int gpio_event_output_func(
+ struct gpio_event_input_devs *input_devs, struct gpio_event_info *info,
+ void **data, int func)
+{
+ int ret;
+ int i;
+ struct gpio_event_output_info *oi;
+ oi = container_of(info, struct gpio_event_output_info, info);
+
+ if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME)
+ return 0;
+
+ if (func == GPIO_EVENT_FUNC_INIT) {
+ int output_level = !(oi->flags & GPIOEDF_ACTIVE_HIGH);
+
+ for (i = 0; i < oi->keymap_size; i++) {
+ int dev = oi->keymap[i].dev;
+ if (dev >= input_devs->count) {
+ pr_err("gpio_event_output_func: bad device "
+ "index %d >= %d for key code %d\n",
+ dev, input_devs->count,
+ oi->keymap[i].code);
+ ret = -EINVAL;
+ goto err_bad_keymap;
+ }
+ input_set_capability(input_devs->dev[dev], oi->type,
+ oi->keymap[i].code);
+ }
+
+ for (i = 0; i < oi->keymap_size; i++) {
+ ret = gpio_request(oi->keymap[i].gpio,
+ "gpio_event_output");
+ if (ret) {
+ pr_err("gpio_event_output_func: gpio_request "
+ "failed for %d\n", oi->keymap[i].gpio);
+ goto err_gpio_request_failed;
+ }
+ ret = gpio_direction_output(oi->keymap[i].gpio,
+ output_level);
+ if (ret) {
+ pr_err("gpio_event_output_func: "
+ "gpio_direction_output failed for %d\n",
+ oi->keymap[i].gpio);
+ goto err_gpio_direction_output_failed;
+ }
+ }
+ return 0;
+ }
+
+ ret = 0;
+ for (i = oi->keymap_size - 1; i >= 0; i--) {
+err_gpio_direction_output_failed:
+ gpio_free(oi->keymap[i].gpio);
+err_gpio_request_failed:
+ ;
+ }
+err_bad_keymap:
+ return ret;
+}
+
diff --git a/drivers/input/misc/keychord.c b/drivers/input/misc/keychord.c
new file mode 100644
index 00000000000..a5ea27ad0e1
--- /dev/null
+++ b/drivers/input/misc/keychord.c
@@ -0,0 +1,391 @@
+/*
+ * drivers/input/misc/keychord.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/keychord.h>
+#include <linux/sched.h>
+
+#define KEYCHORD_NAME "keychord"
+#define BUFFER_SIZE 16
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("Key chord input driver");
+MODULE_SUPPORTED_DEVICE("keychord");
+MODULE_LICENSE("GPL");
+
+#define NEXT_KEYCHORD(kc) ((struct input_keychord *) \
+ ((char *)kc + sizeof(struct input_keychord) + \
+ kc->count * sizeof(kc->keycodes[0])))
+
+struct keychord_device {
+ struct input_handler input_handler;
+ int registered;
+
+ /* list of keychords to monitor */
+ struct input_keychord *keychords;
+ int keychord_count;
+
+ /* bitmask of keys contained in our keychords */
+ unsigned long keybit[BITS_TO_LONGS(KEY_CNT)];
+ /* current state of the keys */
+ unsigned long keystate[BITS_TO_LONGS(KEY_CNT)];
+ /* number of keys that are currently pressed */
+ int key_down;
+
+ /* second input_device_id is needed for null termination */
+ struct input_device_id device_ids[2];
+
+ spinlock_t lock;
+ wait_queue_head_t waitq;
+ unsigned char head;
+ unsigned char tail;
+ __u16 buff[BUFFER_SIZE];
+};
+
+static int check_keychord(struct keychord_device *kdev,
+ struct input_keychord *keychord)
+{
+ int i;
+
+ if (keychord->count != kdev->key_down)
+ return 0;
+
+ for (i = 0; i < keychord->count; i++) {
+ if (!test_bit(keychord->keycodes[i], kdev->keystate))
+ return 0;
+ }
+
+ /* we have a match */
+ return 1;
+}
+
+static void keychord_event(struct input_handle *handle, unsigned int type,
+ unsigned int code, int value)
+{
+ struct keychord_device *kdev = handle->private;
+ struct input_keychord *keychord;
+ unsigned long flags;
+ int i, got_chord = 0;
+
+ if (type != EV_KEY || code >= KEY_MAX)
+ return;
+
+ spin_lock_irqsave(&kdev->lock, flags);
+ /* do nothing if key state did not change */
+ if (!test_bit(code, kdev->keystate) == !value)
+ goto done;
+ __change_bit(code, kdev->keystate);
+ if (value)
+ kdev->key_down++;
+ else
+ kdev->key_down--;
+
+ /* don't notify on key up */
+ if (!value)
+ goto done;
+ /* ignore this event if it is not one of the keys we are monitoring */
+ if (!test_bit(code, kdev->keybit))
+ goto done;
+
+ keychord = kdev->keychords;
+ if (!keychord)
+ goto done;
+
+ /* check to see if the keyboard state matches any keychords */
+ for (i = 0; i < kdev->keychord_count; i++) {
+ if (check_keychord(kdev, keychord)) {
+ kdev->buff[kdev->head] = keychord->id;
+ kdev->head = (kdev->head + 1) % BUFFER_SIZE;
+ got_chord = 1;
+ break;
+ }
+ /* skip to next keychord */
+ keychord = NEXT_KEYCHORD(keychord);
+ }
+
+done:
+ spin_unlock_irqrestore(&kdev->lock, flags);
+
+ if (got_chord) {
+ pr_info("keychord: got keychord id %d. Any tasks: %d\n",
+ keychord->id,
+ !list_empty_careful(&kdev->waitq.task_list));
+ wake_up_interruptible(&kdev->waitq);
+ }
+}
+
+static int keychord_connect(struct input_handler *handler,
+ struct input_dev *dev,
+ const struct input_device_id *id)
+{
+ int i, ret;
+ struct input_handle *handle;
+ struct keychord_device *kdev =
+ container_of(handler, struct keychord_device, input_handler);
+
+ /*
+ * ignore this input device if it does not contain any keycodes
+ * that we are monitoring
+ */
+ for (i = 0; i < KEY_MAX; i++) {
+ if (test_bit(i, kdev->keybit) && test_bit(i, dev->keybit))
+ break;
+ }
+ if (i == KEY_MAX)
+ return -ENODEV;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ handle->dev = dev;
+ handle->handler = handler;
+ handle->name = KEYCHORD_NAME;
+ handle->private = kdev;
+
+ ret = input_register_handle(handle);
+ if (ret)
+ goto err_input_register_handle;
+
+ ret = input_open_device(handle);
+ if (ret)
+ goto err_input_open_device;
+
+ pr_info("keychord: using input dev %s for fevent\n", dev->name);
+
+ return 0;
+
+err_input_open_device:
+ input_unregister_handle(handle);
+err_input_register_handle:
+ kfree(handle);
+ return ret;
+}
+
+static void keychord_disconnect(struct input_handle *handle)
+{
+ input_close_device(handle);
+ input_unregister_handle(handle);
+ kfree(handle);
+}
+
+/*
+ * keychord_read is used to read keychord events from the driver
+ */
+static ssize_t keychord_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct keychord_device *kdev = file->private_data;
+ __u16 id;
+ int retval;
+ unsigned long flags;
+
+ if (count < sizeof(id))
+ return -EINVAL;
+ count = sizeof(id);
+
+ if (kdev->head == kdev->tail && (file->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+
+ retval = wait_event_interruptible(kdev->waitq,
+ kdev->head != kdev->tail);
+ if (retval)
+ return retval;
+
+ spin_lock_irqsave(&kdev->lock, flags);
+ /* pop a keychord ID off the queue */
+ id = kdev->buff[kdev->tail];
+ kdev->tail = (kdev->tail + 1) % BUFFER_SIZE;
+ spin_unlock_irqrestore(&kdev->lock, flags);
+
+ if (copy_to_user(buffer, &id, count))
+ return -EFAULT;
+
+ return count;
+}
+
+/*
+ * keychord_write is used to configure the driver
+ */
+static ssize_t keychord_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct keychord_device *kdev = file->private_data;
+ struct input_keychord *keychords = 0;
+ struct input_keychord *keychord, *next, *end;
+ int ret, i, key;
+ unsigned long flags;
+
+ if (count < sizeof(struct input_keychord))
+ return -EINVAL;
+ keychords = kzalloc(count, GFP_KERNEL);
+ if (!keychords)
+ return -ENOMEM;
+
+ /* read list of keychords from userspace */
+ if (copy_from_user(keychords, buffer, count)) {
+ kfree(keychords);
+ return -EFAULT;
+ }
+
+ /* unregister handler before changing configuration */
+ if (kdev->registered) {
+ input_unregister_handler(&kdev->input_handler);
+ kdev->registered = 0;
+ }
+
+ spin_lock_irqsave(&kdev->lock, flags);
+ /* clear any existing configuration */
+ kfree(kdev->keychords);
+ kdev->keychords = 0;
+ kdev->keychord_count = 0;
+ kdev->key_down = 0;
+ memset(kdev->keybit, 0, sizeof(kdev->keybit));
+ memset(kdev->keystate, 0, sizeof(kdev->keystate));
+ kdev->head = kdev->tail = 0;
+
+ keychord = keychords;
+ end = (struct input_keychord *)((char *)keychord + count);
+
+ while (keychord < end) {
+ next = NEXT_KEYCHORD(keychord);
+ if (keychord->count <= 0 || next > end) {
+ pr_err("keychord: invalid keycode count %d\n",
+ keychord->count);
+ goto err_unlock_return;
+ }
+ if (keychord->version != KEYCHORD_VERSION) {
+ pr_err("keychord: unsupported version %d\n",
+ keychord->version);
+ goto err_unlock_return;
+ }
+
+ /* keep track of the keys we are monitoring in keybit */
+ for (i = 0; i < keychord->count; i++) {
+ key = keychord->keycodes[i];
+ if (key < 0 || key >= KEY_CNT) {
+ pr_err("keychord: keycode %d out of range\n",
+ key);
+ goto err_unlock_return;
+ }
+ __set_bit(key, kdev->keybit);
+ }
+
+ kdev->keychord_count++;
+ keychord = next;
+ }
+
+ kdev->keychords = keychords;
+ spin_unlock_irqrestore(&kdev->lock, flags);
+
+ ret = input_register_handler(&kdev->input_handler);
+ if (ret) {
+ kfree(keychords);
+ kdev->keychords = 0;
+ return ret;
+ }
+ kdev->registered = 1;
+
+ return count;
+
+err_unlock_return:
+ spin_unlock_irqrestore(&kdev->lock, flags);
+ kfree(keychords);
+ return -EINVAL;
+}
+
+static unsigned int keychord_poll(struct file *file, poll_table *wait)
+{
+ struct keychord_device *kdev = file->private_data;
+
+ poll_wait(file, &kdev->waitq, wait);
+
+ if (kdev->head != kdev->tail)
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static int keychord_open(struct inode *inode, struct file *file)
+{
+ struct keychord_device *kdev;
+
+ kdev = kzalloc(sizeof(struct keychord_device), GFP_KERNEL);
+ if (!kdev)
+ return -ENOMEM;
+
+ spin_lock_init(&kdev->lock);
+ init_waitqueue_head(&kdev->waitq);
+
+ kdev->input_handler.event = keychord_event;
+ kdev->input_handler.connect = keychord_connect;
+ kdev->input_handler.disconnect = keychord_disconnect;
+ kdev->input_handler.name = KEYCHORD_NAME;
+ kdev->input_handler.id_table = kdev->device_ids;
+
+ kdev->device_ids[0].flags = INPUT_DEVICE_ID_MATCH_EVBIT;
+ __set_bit(EV_KEY, kdev->device_ids[0].evbit);
+
+ file->private_data = kdev;
+
+ return 0;
+}
+
+static int keychord_release(struct inode *inode, struct file *file)
+{
+ struct keychord_device *kdev = file->private_data;
+
+ if (kdev->registered)
+ input_unregister_handler(&kdev->input_handler);
+ kfree(kdev);
+
+ return 0;
+}
+
+static const struct file_operations keychord_fops = {
+ .owner = THIS_MODULE,
+ .open = keychord_open,
+ .release = keychord_release,
+ .read = keychord_read,
+ .write = keychord_write,
+ .poll = keychord_poll,
+};
+
+static struct miscdevice keychord_misc = {
+ .fops = &keychord_fops,
+ .name = KEYCHORD_NAME,
+ .minor = MISC_DYNAMIC_MINOR,
+};
+
+static int __init keychord_init(void)
+{
+ return misc_register(&keychord_misc);
+}
+
+static void __exit keychord_exit(void)
+{
+ misc_deregister(&keychord_misc);
+}
+
+module_init(keychord_init);
+module_exit(keychord_exit);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index c002d8660e3..3f5743424ff 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -424,6 +424,10 @@ config TI_DAC7512
This driver can also be built as a module. If so, the module
will be called ti_dac7512.
+config UID_STAT
+ bool "UID based statistics tracking exported to /proc/uid_stat"
+ default n
+
config VMWARE_BALLOON
tristate "VMware Balloon Driver"
depends on X86 && HYPERVISOR_GUEST
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index c235d5b6831..a57666cec34 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o
obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
obj-$(CONFIG_DS1682) += ds1682.o
obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o
+obj-$(CONFIG_UID_STAT) += uid_stat.o
obj-$(CONFIG_C2PORT) += c2port/
obj-$(CONFIG_HMC6352) += hmc6352.o
obj-y += eeprom/
diff --git a/drivers/misc/uid_stat.c b/drivers/misc/uid_stat.c
new file mode 100644
index 00000000000..4766c1f83b9
--- /dev/null
+++ b/drivers/misc/uid_stat.c
@@ -0,0 +1,152 @@
+/* drivers/misc/uid_stat.c
+ *
+ * Copyright (C) 2008 - 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/atomic.h>
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/uid_stat.h>
+#include <net/activity_stats.h>
+
+static DEFINE_SPINLOCK(uid_lock);
+static LIST_HEAD(uid_list);
+static struct proc_dir_entry *parent;
+
+struct uid_stat {
+ struct list_head link;
+ uid_t uid;
+ atomic_t tcp_rcv;
+ atomic_t tcp_snd;
+};
+
+static struct uid_stat *find_uid_stat(uid_t uid) {
+ struct uid_stat *entry;
+
+ list_for_each_entry(entry, &uid_list, link) {
+ if (entry->uid == uid) {
+ return entry;
+ }
+ }
+ return NULL;
+}
+
+static int uid_stat_atomic_int_show(struct seq_file *m, void *v)
+{
+ unsigned int bytes;
+ atomic_t *counter = m->private;
+
+ bytes = (unsigned int) (atomic_read(counter) + INT_MIN);
+ return seq_printf(m, "%u\n", bytes);
+}
+
+static int uid_stat_read_atomic_int_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, uid_stat_atomic_int_show, PDE_DATA(inode));
+}
+
+static const struct file_operations uid_stat_read_atomic_int_fops = {
+ .open = uid_stat_read_atomic_int_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/* Create a new entry for tracking the specified uid. */
+static struct uid_stat *create_stat(uid_t uid) {
+ struct uid_stat *new_uid;
+ /* Create the uid stat struct and append it to the list. */
+ new_uid = kmalloc(sizeof(struct uid_stat), GFP_ATOMIC);
+ if (!new_uid)
+ return NULL;
+
+ new_uid->uid = uid;
+ /* Counters start at INT_MIN, so we can track 4GB of network traffic. */
+ atomic_set(&new_uid->tcp_rcv, INT_MIN);
+ atomic_set(&new_uid->tcp_snd, INT_MIN);
+
+ list_add_tail(&new_uid->link, &uid_list);
+ return new_uid;
+}
+
+static void create_stat_proc(struct uid_stat *new_uid)
+{
+ char uid_s[32];
+ struct proc_dir_entry *entry;
+ sprintf(uid_s, "%d", new_uid->uid);
+ entry = proc_mkdir(uid_s, parent);
+
+ /* Keep reference to uid_stat so we know what uid to read stats from. */
+ proc_create_data("tcp_snd", S_IRUGO, entry,
+ &uid_stat_read_atomic_int_fops, &new_uid->tcp_snd);
+
+ proc_create_data("tcp_rcv", S_IRUGO, entry,
+ &uid_stat_read_atomic_int_fops, &new_uid->tcp_rcv);
+}
+
+static struct uid_stat *find_or_create_uid_stat(uid_t uid)
+{
+ struct uid_stat *entry;
+ unsigned long flags;
+ spin_lock_irqsave(&uid_lock, flags);
+ entry = find_uid_stat(uid);
+ if (entry) {
+ spin_unlock_irqrestore(&uid_lock, flags);
+ return entry;
+ }
+ entry = create_stat(uid);
+ spin_unlock_irqrestore(&uid_lock, flags);
+ if (entry)
+ create_stat_proc(entry);
+ return entry;
+}
+
+int uid_stat_tcp_snd(uid_t uid, int size) {
+ struct uid_stat *entry;
+ activity_stats_update();
+ entry = find_or_create_uid_stat(uid);
+ if (!entry)
+ return -1;
+ atomic_add(size, &entry->tcp_snd);
+ return 0;
+}
+
+int uid_stat_tcp_rcv(uid_t uid, int size) {
+ struct uid_stat *entry;
+ activity_stats_update();
+ entry = find_or_create_uid_stat(uid);
+ if (!entry)
+ return -1;
+ atomic_add(size, &entry->tcp_rcv);
+ return 0;
+}
+
+static int __init uid_stat_init(void)
+{
+ parent = proc_mkdir("uid_stat", NULL);
+ if (!parent) {
+ pr_err("uid_stat: failed to create proc entry\n");
+ return -1;
+ }
+ return 0;
+}
+
+__initcall(uid_stat_init);
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 5562308699b..79d82124413 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -50,6 +50,15 @@ config MMC_BLOCK_BOUNCE
If unsure, say Y here.
+config MMC_BLOCK_DEFERRED_RESUME
+ bool "Deferr MMC layer resume until I/O is requested"
+ depends on MMC_BLOCK
+ default n
+ help
+ Say Y here to enable deferred MMC resume until I/O
+ is requested. This will reduce overall resume latency and
+ save power when theres an SD card inserted but not being used.
+
config SDIO_UART
tristate "SDIO UART/GPS class support"
depends on TTY
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 9aca9462a12..9cf08651d2b 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -35,6 +35,9 @@
#include <linux/capability.h>
#include <linux/compat.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/mmc.h>
+
#include <linux/mmc/ioctl.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -163,11 +166,7 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
static inline int mmc_get_devidx(struct gendisk *disk)
{
- int devmaj = MAJOR(disk_devt(disk));
- int devidx = MINOR(disk_devt(disk)) / perdev_minors;
-
- if (!devmaj)
- devidx = disk->first_minor / perdev_minors;
+ int devidx = disk->first_minor / perdev_minors;
return devidx;
}
@@ -728,18 +727,22 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
req->rq_disk->disk_name, "timed out", name, status);
/* If the status cmd initially failed, retry the r/w cmd */
- if (!status_valid)
+ if (!status_valid) {
+ pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name);
return ERR_RETRY;
-
+ }
/*
* If it was a r/w cmd crc error, or illegal command
* (eg, issued in wrong state) then retry - we should
* have corrected the state problem above.
*/
- if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
+ if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
+ pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name);
return ERR_RETRY;
+ }
/* Otherwise abort the command */
+ pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name);
return ERR_ABORT;
default:
@@ -1019,9 +1022,12 @@ retry:
goto out;
}
- if (mmc_can_sanitize(card))
+ if (mmc_can_sanitize(card)) {
+ trace_mmc_blk_erase_start(EXT_CSD_SANITIZE_START, 0, 0);
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_SANITIZE_START, 1, 0);
+ trace_mmc_blk_erase_end(EXT_CSD_SANITIZE_START, 0, 0);
+ }
out_retry:
if (err && !mmc_blk_reset(md, card->host, type))
goto retry;
@@ -1933,6 +1939,11 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
unsigned long flags;
unsigned int cmd_flags = req ? req->cmd_flags : 0;
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ if (mmc_bus_needs_resume(card->host))
+ mmc_resume_bus(card->host);
+#endif
+
if (req && !mq->mqrq_prev->req)
/* claim host only for the first request */
mmc_claim_host(card->host);
@@ -2055,6 +2066,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
md->disk->queue = md->queue.queue;
md->disk->driverfs_dev = parent;
set_disk_ro(md->disk, md->read_only || default_ro);
+ md->disk->flags = GENHD_FL_EXT_DEVT;
if (area_type & MMC_BLK_DATA_AREA_RPMB)
md->disk->flags |= GENHD_FL_NO_PART_SCAN;
@@ -2369,6 +2381,9 @@ static int mmc_blk_probe(struct mmc_card *card)
mmc_set_drvdata(card, md);
mmc_fixup_device(card, blk_fixups);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ mmc_set_bus_resume_policy(card->host, 1);
+#endif
if (mmc_add_disk(md))
goto out;
@@ -2394,6 +2409,9 @@ static void mmc_blk_remove(struct mmc_card *card)
mmc_release_host(card->host);
mmc_blk_remove_req(md);
mmc_set_drvdata(card, NULL);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ mmc_set_bus_resume_policy(card->host, 0);
+#endif
}
#ifdef CONFIG_PM
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index 269d072ef55..ae10a372af0 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -26,3 +26,18 @@ config MMC_CLKGATE
support handling this in order for it to be of any use.
If unsure, say N.
+
+config MMC_EMBEDDED_SDIO
+ boolean "MMC embedded SDIO device support (EXPERIMENTAL)"
+ help
+ If you say Y here, support will be added for embedded SDIO
+ devices which do not contain the necessary enumeration
+ support in hardware to be properly detected.
+
+config MMC_PARANOID_SD_INIT
+ bool "Enable paranoid SD card initialization (EXPERIMENTAL)"
+ help
+ If you say Y here, the MMC layer will be extra paranoid
+ about re-trying SD init requests. This can be a useful
+ work-around for buggy controllers and hardware. Enable
+ if you are experiencing issues with SD detection.
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index c40396f2320..6a83f4ccc10 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -27,6 +27,9 @@
#include <linux/fault-inject.h>
#include <linux/random.h>
#include <linux/slab.h>
+#include <linux/wakelock.h>
+
+#include <trace/events/mmc.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -172,6 +175,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
pr_debug("%s: %d bytes transferred: %d\n",
mmc_hostname(host),
mrq->data->bytes_xfered, mrq->data->error);
+ trace_mmc_blk_rw_end(cmd->opcode, cmd->arg, mrq->data);
}
if (mrq->stop) {
@@ -536,8 +540,12 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
mmc_start_bkops(host->card, true);
}
- if (!err && areq)
+ if (!err && areq) {
+ trace_mmc_blk_rw_start(areq->mrq->cmd->opcode,
+ areq->mrq->cmd->arg,
+ areq->mrq->data);
start_err = __mmc_start_data_req(host, areq->mrq);
+ }
if (host->areq)
mmc_post_req(host, host->areq->mrq, 0);
@@ -1591,6 +1599,36 @@ static inline void mmc_bus_put(struct mmc_host *host)
spin_unlock_irqrestore(&host->lock, flags);
}
+int mmc_resume_bus(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ if (!mmc_bus_needs_resume(host))
+ return -EINVAL;
+
+ printk("%s: Starting deferred resume\n", mmc_hostname(host));
+ spin_lock_irqsave(&host->lock, flags);
+ host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
+ host->rescan_disable = 0;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ mmc_bus_get(host);
+ if (host->bus_ops && !host->bus_dead) {
+ mmc_power_up(host);
+ BUG_ON(!host->bus_ops->resume);
+ host->bus_ops->resume(host);
+ }
+
+ if (host->bus_ops->detect && !host->bus_dead)
+ host->bus_ops->detect(host);
+
+ mmc_bus_put(host);
+ printk("%s: Deferred resume completed\n", mmc_hostname(host));
+ return 0;
+}
+
+EXPORT_SYMBOL(mmc_resume_bus);
+
/*
* Assign a mmc bus handler to a host. Only one bus handler may control a
* host at any given time.
@@ -1656,6 +1694,8 @@ void mmc_detect_change(struct mmc_host *host, unsigned long delay)
spin_unlock_irqrestore(&host->lock, flags);
#endif
host->detect_change = 1;
+
+ wake_lock(&host->detect_wake_lock);
mmc_schedule_delayed_work(&host->detect, delay);
}
@@ -1815,8 +1855,13 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
struct mmc_command cmd = {0};
unsigned int qty = 0;
unsigned long timeout;
+ unsigned int fr, nr;
int err;
+ fr = from;
+ nr = to - from + 1;
+ trace_mmc_blk_erase_start(arg, fr, nr);
+
/*
* qty is used to calculate the erase timeout which depends on how many
* erase groups (or allocation units in SD terminology) are affected.
@@ -1920,6 +1965,8 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
(R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
out:
+
+ trace_mmc_blk_erase_end(arg, fr, nr);
return err;
}
@@ -2351,6 +2398,7 @@ void mmc_rescan(struct work_struct *work)
struct mmc_host *host =
container_of(work, struct mmc_host, detect.work);
int i;
+ bool extend_wakelock = false;
if (host->rescan_disable)
return;
@@ -2372,6 +2420,12 @@ void mmc_rescan(struct work_struct *work)
host->detect_change = 0;
+ /* If the card was removed the bus will be marked
+ * as dead - extend the wakelock so userspace
+ * can respond */
+ if (host->bus_dead)
+ extend_wakelock = 1;
+
/*
* Let mmc_bus_put() free the bus/bus_ops if we've found that
* the card is no longer present.
@@ -2400,16 +2454,24 @@ void mmc_rescan(struct work_struct *work)
mmc_claim_host(host);
for (i = 0; i < ARRAY_SIZE(freqs); i++) {
- if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
+ if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) {
+ extend_wakelock = true;
break;
+ }
if (freqs[i] <= host->f_min)
break;
}
mmc_release_host(host);
out:
- if (host->caps & MMC_CAP_NEEDS_POLL)
+ if (extend_wakelock)
+ wake_lock_timeout(&host->detect_wake_lock, HZ / 2);
+ else
+ wake_unlock(&host->detect_wake_lock);
+ if (host->caps & MMC_CAP_NEEDS_POLL) {
+ wake_lock(&host->detect_wake_lock);
mmc_schedule_delayed_work(&host->detect, HZ);
+ }
}
void mmc_start_host(struct mmc_host *host)
@@ -2433,7 +2495,8 @@ void mmc_stop_host(struct mmc_host *host)
#endif
host->rescan_disable = 1;
- cancel_delayed_work_sync(&host->detect);
+ if (cancel_delayed_work_sync(&host->detect))
+ wake_unlock(&host->detect_wake_lock);
mmc_flush_scheduled_work();
/* clear pm flags now and let card drivers set them as needed */
@@ -2628,7 +2691,11 @@ int mmc_suspend_host(struct mmc_host *host)
{
int err = 0;
- cancel_delayed_work(&host->detect);
+ if (mmc_bus_needs_resume(host))
+ return 0;
+
+ if (cancel_delayed_work(&host->detect))
+ wake_unlock(&host->detect_wake_lock);
mmc_flush_scheduled_work();
mmc_bus_get(host);
@@ -2679,6 +2746,12 @@ int mmc_resume_host(struct mmc_host *host)
int err = 0;
mmc_bus_get(host);
+ if (mmc_bus_manual_resume(host)) {
+ host->bus_resume_flags |= MMC_BUSRESUME_NEEDS_RESUME;
+ mmc_bus_put(host);
+ return 0;
+ }
+
if (host->bus_ops && !host->bus_dead) {
if (!mmc_card_keep_power(host)) {
mmc_power_up(host);
@@ -2739,9 +2812,14 @@ int mmc_pm_notify(struct notifier_block *notify_block,
}
spin_lock_irqsave(&host->lock, flags);
+ if (mmc_bus_needs_resume(host)) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ break;
+ }
host->rescan_disable = 1;
spin_unlock_irqrestore(&host->lock, flags);
- cancel_delayed_work_sync(&host->detect);
+ if (cancel_delayed_work_sync(&host->detect))
+ wake_unlock(&host->detect_wake_lock);
if (!host->bus_ops || host->bus_ops->suspend)
break;
@@ -2762,6 +2840,10 @@ int mmc_pm_notify(struct notifier_block *notify_block,
case PM_POST_RESTORE:
spin_lock_irqsave(&host->lock, flags);
+ if (mmc_bus_manual_resume(host)) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ break;
+ }
host->rescan_disable = 0;
spin_unlock_irqrestore(&host->lock, flags);
mmc_detect_change(host, 0);
@@ -2789,6 +2871,22 @@ void mmc_init_context_info(struct mmc_host *host)
init_waitqueue_head(&host->context_info.wait);
}
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+void mmc_set_embedded_sdio_data(struct mmc_host *host,
+ struct sdio_cis *cis,
+ struct sdio_cccr *cccr,
+ struct sdio_embedded_func *funcs,
+ int num_funcs)
+{
+ host->embedded_sdio_data.cis = cis;
+ host->embedded_sdio_data.cccr = cccr;
+ host->embedded_sdio_data.funcs = funcs;
+ host->embedded_sdio_data.num_funcs = num_funcs;
+}
+
+EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
+#endif
+
static int __init mmc_init(void)
{
int ret;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 2a3593d9f87..56dadd667ec 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -459,6 +459,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq);
+ wake_lock_init(&host->detect_wake_lock, WAKE_LOCK_SUSPEND,
+ kasprintf(GFP_KERNEL, "%s_detect", mmc_hostname(host)));
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
#ifdef CONFIG_PM
host->pm_notify.notifier_call = mmc_pm_notify;
@@ -511,7 +513,8 @@ int mmc_add_host(struct mmc_host *host)
mmc_host_clk_sysfs_init(host);
mmc_start_host(host);
- register_pm_notifier(&host->pm_notify);
+ if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
+ register_pm_notifier(&host->pm_notify);
return 0;
}
@@ -528,7 +531,9 @@ EXPORT_SYMBOL(mmc_add_host);
*/
void mmc_remove_host(struct mmc_host *host)
{
- unregister_pm_notifier(&host->pm_notify);
+ if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
+ unregister_pm_notifier(&host->pm_notify);
+
mmc_stop_host(host);
#ifdef CONFIG_DEBUG_FS
@@ -555,6 +560,7 @@ void mmc_free_host(struct mmc_host *host)
spin_lock(&mmc_host_lock);
idr_remove(&mmc_host_idr, host->index);
spin_unlock(&mmc_host_lock);
+ wake_lock_destroy(&host->detect_wake_lock);
put_device(&host->class_dev);
}
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 9e645e19cec..f008318c5c4 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -805,6 +805,9 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
bool reinit)
{
int err;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ int retries;
+#endif
if (!reinit) {
/*
@@ -831,7 +834,26 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
/*
* Fetch switch information from card.
*/
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ for (retries = 1; retries <= 3; retries++) {
+ err = mmc_read_switch(card);
+ if (!err) {
+ if (retries > 1) {
+ printk(KERN_WARNING
+ "%s: recovered\n",
+ mmc_hostname(host));
+ }
+ break;
+ } else {
+ printk(KERN_WARNING
+ "%s: read switch failed (attempt %d)\n",
+ mmc_hostname(host), retries);
+ }
+ }
+#else
err = mmc_read_switch(card);
+#endif
+
if (err)
return err;
}
@@ -1032,7 +1054,10 @@ static int mmc_sd_alive(struct mmc_host *host)
*/
static void mmc_sd_detect(struct mmc_host *host)
{
- int err;
+ int err = 0;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ int retries = 5;
+#endif
BUG_ON(!host);
BUG_ON(!host->card);
@@ -1042,7 +1067,23 @@ static void mmc_sd_detect(struct mmc_host *host)
/*
* Just check if our card has been removed.
*/
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ while(retries) {
+ err = mmc_send_status(host->card, NULL);
+ if (err) {
+ retries--;
+ udelay(5);
+ continue;
+ }
+ break;
+ }
+ if (!retries) {
+ printk(KERN_ERR "%s(%s): Unable to re-detect card (%d)\n",
+ __func__, mmc_hostname(host), err);
+ }
+#else
err = _mmc_detect_card_removed(host);
+#endif
mmc_release_host(host);
@@ -1084,12 +1125,31 @@ static int mmc_sd_suspend(struct mmc_host *host)
static int mmc_sd_resume(struct mmc_host *host)
{
int err;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ int retries;
+#endif
BUG_ON(!host);
BUG_ON(!host->card);
mmc_claim_host(host);
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ retries = 5;
+ while (retries) {
+ err = mmc_sd_init_card(host, host->ocr, host->card);
+
+ if (err) {
+ printk(KERN_ERR "%s: Re-init card rc = %d (retries = %d)\n",
+ mmc_hostname(host), err, retries);
+ mdelay(5);
+ retries--;
+ continue;
+ }
+ break;
+ }
+#else
err = mmc_sd_init_card(host, host->ocr, host->card);
+#endif
mmc_release_host(host);
return err;
@@ -1143,6 +1203,9 @@ int mmc_attach_sd(struct mmc_host *host)
{
int err;
u32 ocr;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ int retries;
+#endif
BUG_ON(!host);
WARN_ON(!host->claimed);
@@ -1198,9 +1261,27 @@ int mmc_attach_sd(struct mmc_host *host)
/*
* Detect and init the card.
*/
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ retries = 5;
+ while (retries) {
+ err = mmc_sd_init_card(host, host->ocr, NULL);
+ if (err) {
+ retries--;
+ continue;
+ }
+ break;
+ }
+
+ if (!retries) {
+ printk(KERN_ERR "%s: mmc_sd_init_card() failure (err = %d)\n",
+ mmc_hostname(host), err);
+ goto err;
+ }
+#else
err = mmc_sd_init_card(host, host->ocr, NULL);
if (err)
goto err;
+#endif
mmc_release_host(host);
err = mmc_add_card(host->card);
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 6889a821c1d..46e68f125ff 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -10,6 +10,7 @@
*/
#include <linux/err.h>
+#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/mmc/host.h>
@@ -28,6 +29,10 @@
#include "sdio_ops.h"
#include "sdio_cis.h"
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+#include <linux/mmc/sdio_ids.h>
+#endif
+
static int sdio_read_fbr(struct sdio_func *func)
{
int ret;
@@ -728,19 +733,35 @@ try_again:
goto finish;
}
- /*
- * Read the common registers.
- */
- err = sdio_read_cccr(card, ocr);
- if (err)
- goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (host->embedded_sdio_data.cccr)
+ memcpy(&card->cccr, host->embedded_sdio_data.cccr, sizeof(struct sdio_cccr));
+ else {
+#endif
+ /*
+ * Read the common registers.
+ */
+ err = sdio_read_cccr(card, ocr);
+ if (err)
+ goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ }
+#endif
- /*
- * Read the common CIS tuples.
- */
- err = sdio_read_common_cis(card);
- if (err)
- goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (host->embedded_sdio_data.cis)
+ memcpy(&card->cis, host->embedded_sdio_data.cis, sizeof(struct sdio_cis));
+ else {
+#endif
+ /*
+ * Read the common CIS tuples.
+ */
+ err = sdio_read_common_cis(card);
+ if (err)
+ goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ }
+#endif
if (oldcard) {
int same = (card->cis.vendor == oldcard->cis.vendor &&
@@ -1147,14 +1168,36 @@ int mmc_attach_sdio(struct mmc_host *host)
funcs = (ocr & 0x70000000) >> 28;
card->sdio_funcs = 0;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (host->embedded_sdio_data.funcs)
+ card->sdio_funcs = funcs = host->embedded_sdio_data.num_funcs;
+#endif
+
/*
* Initialize (but don't add) all present functions.
*/
for (i = 0; i < funcs; i++, card->sdio_funcs++) {
- err = sdio_init_func(host->card, i + 1);
- if (err)
- goto remove;
-
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (host->embedded_sdio_data.funcs) {
+ struct sdio_func *tmp;
+
+ tmp = sdio_alloc_func(host->card);
+ if (IS_ERR(tmp))
+ goto remove;
+ tmp->num = (i + 1);
+ card->sdio_func[i] = tmp;
+ tmp->class = host->embedded_sdio_data.funcs[i].f_class;
+ tmp->max_blksize = host->embedded_sdio_data.funcs[i].f_maxblksize;
+ tmp->vendor = card->cis.vendor;
+ tmp->device = card->cis.device;
+ } else {
+#endif
+ err = sdio_init_func(host->card, i + 1);
+ if (err)
+ goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ }
+#endif
/*
* Enable Runtime PM for this func (if supported)
*/
@@ -1202,3 +1245,39 @@ err:
return err;
}
+int sdio_reset_comm(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u32 ocr;
+ int err;
+
+ printk("%s():\n", __func__);
+ mmc_claim_host(host);
+
+ mmc_go_idle(host);
+
+ mmc_set_clock(host, host->f_min);
+
+ err = mmc_send_io_op_cond(host, 0, &ocr);
+ if (err)
+ goto err;
+
+ host->ocr = mmc_select_voltage(host, ocr);
+ if (!host->ocr) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ err = mmc_sdio_init_card(host, host->ocr, card, 0);
+ if (err)
+ goto err;
+
+ mmc_release_host(host);
+ return 0;
+err:
+ printk("%s: Error resetting SDIO communications (%d)\n",
+ mmc_hostname(host), err);
+ mmc_release_host(host);
+ return err;
+}
+EXPORT_SYMBOL(sdio_reset_comm);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 546c67c2bbb..c012cf59b7d 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -25,6 +25,10 @@
#include "sdio_cis.h"
#include "sdio_bus.h"
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+#include <linux/mmc/host.h>
+#endif
+
/* show configuration fields */
#define sdio_config_attr(field, format_string) \
static ssize_t \
@@ -270,7 +274,14 @@ static void sdio_release_func(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
- sdio_free_func_cis(func);
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ /*
+ * If this device is embedded then we never allocated
+ * cis tables for this func
+ */
+ if (!func->card->host->embedded_sdio_data.funcs)
+#endif
+ sdio_free_func_cis(func);
kfree(func->info);
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index 78cb4d5d9d5..8fdeb07723a 100644..100755
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -384,6 +384,39 @@ u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret)
EXPORT_SYMBOL_GPL(sdio_readb);
/**
+ * sdio_readb_ext - read a single byte from a SDIO function
+ * @func: SDIO function to access
+ * @addr: address to read
+ * @err_ret: optional status value from transfer
+ * @in: value to add to argument
+ *
+ * Reads a single byte from the address space of a given SDIO
+ * function. If there is a problem reading the address, 0xff
+ * is returned and @err_ret will contain the error code.
+ */
+unsigned char sdio_readb_ext(struct sdio_func *func, unsigned int addr,
+ int *err_ret, unsigned in)
+{
+ int ret;
+ unsigned char val;
+
+ BUG_ON(!func);
+
+ if (err_ret)
+ *err_ret = 0;
+
+ ret = mmc_io_rw_direct(func->card, 0, func->num, addr, (u8)in, &val);
+ if (ret) {
+ if (err_ret)
+ *err_ret = ret;
+ return 0xFF;
+ }
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(sdio_readb_ext);
+
+/**
* sdio_writeb - write a single byte to a SDIO function
* @func: SDIO function to access
* @b: byte to write
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 50543f16621..f2ab08c2c5f 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -1,3 +1,10 @@
+config MTD_NAND_IDS
+ tristate "Include chip ids for known NAND devices."
+ depends on MTD
+ help
+ Useful for NAND drivers that do not use the NAND subsystem but
+ still like to take advantage of the known chip information.
+
config MTD_NAND_ECC
tristate
@@ -133,9 +140,6 @@ config BCH_CONST_T
default 8 if MTD_NAND_OMAP_BCH8
endif
-config MTD_NAND_IDS
- tristate
-
config MTD_NAND_RICOH
tristate "Ricoh xD card reader"
default n
diff --git a/drivers/net/ppp/Kconfig b/drivers/net/ppp/Kconfig
index 1373c6d7278..282aec4860e 100644
--- a/drivers/net/ppp/Kconfig
+++ b/drivers/net/ppp/Kconfig
@@ -149,6 +149,23 @@ config PPPOL2TP
tunnels. L2TP is replacing PPTP for VPN uses.
if TTY
+config PPPOLAC
+ tristate "PPP on L2TP Access Concentrator"
+ depends on PPP && INET
+ help
+ L2TP (RFC 2661) is a tunneling protocol widely used in virtual private
+ networks. This driver handles L2TP data packets between a UDP socket
+ and a PPP channel, but only permits one session per socket. Thus it is
+ fairly simple and suited for clients.
+
+config PPPOPNS
+ tristate "PPP on PPTP Network Server"
+ depends on PPP && INET
+ help
+ PPTP (RFC 2637) is a tunneling protocol widely used in virtual private
+ networks. This driver handles PPTP data packets between a RAW socket
+ and a PPP channel. It is fairly simple and easy to use.
+
config PPP_ASYNC
tristate "PPP support for async serial ports"
depends on PPP
diff --git a/drivers/net/ppp/Makefile b/drivers/net/ppp/Makefile
index a6b6297b006..d283d03c468 100644
--- a/drivers/net/ppp/Makefile
+++ b/drivers/net/ppp/Makefile
@@ -11,3 +11,5 @@ obj-$(CONFIG_PPP_SYNC_TTY) += ppp_synctty.o
obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
obj-$(CONFIG_PPPOL2TP) += pppox.o
obj-$(CONFIG_PPTP) += pppox.o pptp.o
+obj-$(CONFIG_PPPOLAC) += pppox.o pppolac.o
+obj-$(CONFIG_PPPOPNS) += pppox.o pppopns.o
diff --git a/drivers/net/ppp/pppolac.c b/drivers/net/ppp/pppolac.c
new file mode 100644
index 00000000000..a5d3d634fd9
--- /dev/null
+++ b/drivers/net/ppp/pppolac.c
@@ -0,0 +1,449 @@
+/* drivers/net/pppolac.c
+ *
+ * Driver for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* This driver handles L2TP data packets between a UDP socket and a PPP channel.
+ * The socket must keep connected, and only one session per socket is permitted.
+ * Sequencing of outgoing packets is controlled by LNS. Incoming packets with
+ * sequences are reordered within a sliding window of one second. Currently
+ * reordering only happens when a packet is received. It is done for simplicity
+ * since no additional locks or threads are required. This driver only works on
+ * IPv4 due to the lack of UDP encapsulation support in IPv6. */
+
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/file.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/udp.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_ppp.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_channel.h>
+#include <net/tcp_states.h>
+#include <asm/uaccess.h>
+
+#define L2TP_CONTROL_BIT 0x80
+#define L2TP_LENGTH_BIT 0x40
+#define L2TP_SEQUENCE_BIT 0x08
+#define L2TP_OFFSET_BIT 0x02
+#define L2TP_VERSION 0x02
+#define L2TP_VERSION_MASK 0x0F
+
+#define PPP_ADDR 0xFF
+#define PPP_CTRL 0x03
+
+union unaligned {
+ __u32 u32;
+} __attribute__((packed));
+
+static inline union unaligned *unaligned(void *ptr)
+{
+ return (union unaligned *)ptr;
+}
+
+struct meta {
+ __u32 sequence;
+ __u32 timestamp;
+};
+
+static inline struct meta *skb_meta(struct sk_buff *skb)
+{
+ return (struct meta *)skb->cb;
+}
+
+/******************************************************************************/
+
+static int pppolac_recv_core(struct sock *sk_udp, struct sk_buff *skb)
+{
+ struct sock *sk = (struct sock *)sk_udp->sk_user_data;
+ struct pppolac_opt *opt = &pppox_sk(sk)->proto.lac;
+ struct meta *meta = skb_meta(skb);
+ __u32 now = jiffies;
+ __u8 bits;
+ __u8 *ptr;
+
+ /* Drop the packet if L2TP header is missing. */
+ if (skb->len < sizeof(struct udphdr) + 6)
+ goto drop;
+
+ /* Put it back if it is a control packet. */
+ if (skb->data[sizeof(struct udphdr)] & L2TP_CONTROL_BIT)
+ return opt->backlog_rcv(sk_udp, skb);
+
+ /* Skip UDP header. */
+ skb_pull(skb, sizeof(struct udphdr));
+
+ /* Check the version. */
+ if ((skb->data[1] & L2TP_VERSION_MASK) != L2TP_VERSION)
+ goto drop;
+ bits = skb->data[0];
+ ptr = &skb->data[2];
+
+ /* Check the length if it is present. */
+ if (bits & L2TP_LENGTH_BIT) {
+ if ((ptr[0] << 8 | ptr[1]) != skb->len)
+ goto drop;
+ ptr += 2;
+ }
+
+ /* Skip all fields including optional ones. */
+ if (!skb_pull(skb, 6 + (bits & L2TP_SEQUENCE_BIT ? 4 : 0) +
+ (bits & L2TP_LENGTH_BIT ? 2 : 0) +
+ (bits & L2TP_OFFSET_BIT ? 2 : 0)))
+ goto drop;
+
+ /* Skip the offset padding if it is present. */
+ if (bits & L2TP_OFFSET_BIT &&
+ !skb_pull(skb, skb->data[-2] << 8 | skb->data[-1]))
+ goto drop;
+
+ /* Check the tunnel and the session. */
+ if (unaligned(ptr)->u32 != opt->local)
+ goto drop;
+
+ /* Check the sequence if it is present. */
+ if (bits & L2TP_SEQUENCE_BIT) {
+ meta->sequence = ptr[4] << 8 | ptr[5];
+ if ((__s16)(meta->sequence - opt->recv_sequence) < 0)
+ goto drop;
+ }
+
+ /* Skip PPP address and control if they are present. */
+ if (skb->len >= 2 && skb->data[0] == PPP_ADDR &&
+ skb->data[1] == PPP_CTRL)
+ skb_pull(skb, 2);
+
+ /* Fix PPP protocol if it is compressed. */
+ if (skb->len >= 1 && skb->data[0] & 1)
+ skb_push(skb, 1)[0] = 0;
+
+ /* Drop the packet if PPP protocol is missing. */
+ if (skb->len < 2)
+ goto drop;
+
+ /* Perform reordering if sequencing is enabled. */
+ atomic_set(&opt->sequencing, bits & L2TP_SEQUENCE_BIT);
+ if (bits & L2TP_SEQUENCE_BIT) {
+ struct sk_buff *skb1;
+
+ /* Insert the packet into receive queue in order. */
+ skb_set_owner_r(skb, sk);
+ skb_queue_walk(&sk->sk_receive_queue, skb1) {
+ struct meta *meta1 = skb_meta(skb1);
+ __s16 order = meta->sequence - meta1->sequence;
+ if (order == 0)
+ goto drop;
+ if (order < 0) {
+ meta->timestamp = meta1->timestamp;
+ skb_insert(skb1, skb, &sk->sk_receive_queue);
+ skb = NULL;
+ break;
+ }
+ }
+ if (skb) {
+ meta->timestamp = now;
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ }
+
+ /* Remove packets from receive queue as long as
+ * 1. the receive buffer is full,
+ * 2. they are queued longer than one second, or
+ * 3. there are no missing packets before them. */
+ skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) {
+ meta = skb_meta(skb);
+ if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
+ now - meta->timestamp < HZ &&
+ meta->sequence != opt->recv_sequence)
+ break;
+ skb_unlink(skb, &sk->sk_receive_queue);
+ opt->recv_sequence = (__u16)(meta->sequence + 1);
+ skb_orphan(skb);
+ ppp_input(&pppox_sk(sk)->chan, skb);
+ }
+ return NET_RX_SUCCESS;
+ }
+
+ /* Flush receive queue if sequencing is disabled. */
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_orphan(skb);
+ ppp_input(&pppox_sk(sk)->chan, skb);
+ return NET_RX_SUCCESS;
+drop:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+static int pppolac_recv(struct sock *sk_udp, struct sk_buff *skb)
+{
+ sock_hold(sk_udp);
+ sk_receive_skb(sk_udp, skb, 0);
+ return 0;
+}
+
+static struct sk_buff_head delivery_queue;
+
+static void pppolac_xmit_core(struct work_struct *delivery_work)
+{
+ mm_segment_t old_fs = get_fs();
+ struct sk_buff *skb;
+
+ set_fs(KERNEL_DS);
+ while ((skb = skb_dequeue(&delivery_queue))) {
+ struct sock *sk_udp = skb->sk;
+ struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
+ struct msghdr msg = {
+ .msg_iov = (struct iovec *)&iov,
+ .msg_iovlen = 1,
+ .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT,
+ };
+ sk_udp->sk_prot->sendmsg(NULL, sk_udp, &msg, skb->len);
+ kfree_skb(skb);
+ }
+ set_fs(old_fs);
+}
+
+static DECLARE_WORK(delivery_work, pppolac_xmit_core);
+
+static int pppolac_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+ struct sock *sk_udp = (struct sock *)chan->private;
+ struct pppolac_opt *opt = &pppox_sk(sk_udp->sk_user_data)->proto.lac;
+
+ /* Install PPP address and control. */
+ skb_push(skb, 2);
+ skb->data[0] = PPP_ADDR;
+ skb->data[1] = PPP_CTRL;
+
+ /* Install L2TP header. */
+ if (atomic_read(&opt->sequencing)) {
+ skb_push(skb, 10);
+ skb->data[0] = L2TP_SEQUENCE_BIT;
+ skb->data[6] = opt->xmit_sequence >> 8;
+ skb->data[7] = opt->xmit_sequence;
+ skb->data[8] = 0;
+ skb->data[9] = 0;
+ opt->xmit_sequence++;
+ } else {
+ skb_push(skb, 6);
+ skb->data[0] = 0;
+ }
+ skb->data[1] = L2TP_VERSION;
+ unaligned(&skb->data[2])->u32 = opt->remote;
+
+ /* Now send the packet via the delivery queue. */
+ skb_set_owner_w(skb, sk_udp);
+ skb_queue_tail(&delivery_queue, skb);
+ schedule_work(&delivery_work);
+ return 1;
+}
+
+/******************************************************************************/
+
+static struct ppp_channel_ops pppolac_channel_ops = {
+ .start_xmit = pppolac_xmit,
+};
+
+static int pppolac_connect(struct socket *sock, struct sockaddr *useraddr,
+ int addrlen, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct pppox_sock *po = pppox_sk(sk);
+ struct sockaddr_pppolac *addr = (struct sockaddr_pppolac *)useraddr;
+ struct socket *sock_udp = NULL;
+ struct sock *sk_udp;
+ int error;
+
+ if (addrlen != sizeof(struct sockaddr_pppolac) ||
+ !addr->local.tunnel || !addr->local.session ||
+ !addr->remote.tunnel || !addr->remote.session) {
+ return -EINVAL;
+ }
+
+ lock_sock(sk);
+ error = -EALREADY;
+ if (sk->sk_state != PPPOX_NONE)
+ goto out;
+
+ sock_udp = sockfd_lookup(addr->udp_socket, &error);
+ if (!sock_udp)
+ goto out;
+ sk_udp = sock_udp->sk;
+ lock_sock(sk_udp);
+
+ /* Remove this check when IPv6 supports UDP encapsulation. */
+ error = -EAFNOSUPPORT;
+ if (sk_udp->sk_family != AF_INET)
+ goto out;
+ error = -EPROTONOSUPPORT;
+ if (sk_udp->sk_protocol != IPPROTO_UDP)
+ goto out;
+ error = -EDESTADDRREQ;
+ if (sk_udp->sk_state != TCP_ESTABLISHED)
+ goto out;
+ error = -EBUSY;
+ if (udp_sk(sk_udp)->encap_type || sk_udp->sk_user_data)
+ goto out;
+ if (!sk_udp->sk_bound_dev_if) {
+ struct dst_entry *dst = sk_dst_get(sk_udp);
+ error = -ENODEV;
+ if (!dst)
+ goto out;
+ sk_udp->sk_bound_dev_if = dst->dev->ifindex;
+ dst_release(dst);
+ }
+
+ po->chan.hdrlen = 12;
+ po->chan.private = sk_udp;
+ po->chan.ops = &pppolac_channel_ops;
+ po->chan.mtu = PPP_MRU - 80;
+ po->proto.lac.local = unaligned(&addr->local)->u32;
+ po->proto.lac.remote = unaligned(&addr->remote)->u32;
+ atomic_set(&po->proto.lac.sequencing, 1);
+ po->proto.lac.backlog_rcv = sk_udp->sk_backlog_rcv;
+
+ error = ppp_register_channel(&po->chan);
+ if (error)
+ goto out;
+
+ sk->sk_state = PPPOX_CONNECTED;
+ udp_sk(sk_udp)->encap_type = UDP_ENCAP_L2TPINUDP;
+ udp_sk(sk_udp)->encap_rcv = pppolac_recv;
+ sk_udp->sk_backlog_rcv = pppolac_recv_core;
+ sk_udp->sk_user_data = sk;
+out:
+ if (sock_udp) {
+ release_sock(sk_udp);
+ if (error)
+ sockfd_put(sock_udp);
+ }
+ release_sock(sk);
+ return error;
+}
+
+static int pppolac_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+
+ if (!sk)
+ return 0;
+
+ lock_sock(sk);
+ if (sock_flag(sk, SOCK_DEAD)) {
+ release_sock(sk);
+ return -EBADF;
+ }
+
+ if (sk->sk_state != PPPOX_NONE) {
+ struct sock *sk_udp = (struct sock *)pppox_sk(sk)->chan.private;
+ lock_sock(sk_udp);
+ skb_queue_purge(&sk->sk_receive_queue);
+ pppox_unbind_sock(sk);
+ udp_sk(sk_udp)->encap_type = 0;
+ udp_sk(sk_udp)->encap_rcv = NULL;
+ sk_udp->sk_backlog_rcv = pppox_sk(sk)->proto.lac.backlog_rcv;
+ sk_udp->sk_user_data = NULL;
+ release_sock(sk_udp);
+ sockfd_put(sk_udp->sk_socket);
+ }
+
+ sock_orphan(sk);
+ sock->sk = NULL;
+ release_sock(sk);
+ sock_put(sk);
+ return 0;
+}
+
+/******************************************************************************/
+
+static struct proto pppolac_proto = {
+ .name = "PPPOLAC",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct pppox_sock),
+};
+
+static struct proto_ops pppolac_proto_ops = {
+ .family = PF_PPPOX,
+ .owner = THIS_MODULE,
+ .release = pppolac_release,
+ .bind = sock_no_bind,
+ .connect = pppolac_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = sock_no_getname,
+ .poll = sock_no_poll,
+ .ioctl = pppox_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = sock_no_setsockopt,
+ .getsockopt = sock_no_getsockopt,
+ .sendmsg = sock_no_sendmsg,
+ .recvmsg = sock_no_recvmsg,
+ .mmap = sock_no_mmap,
+};
+
+static int pppolac_create(struct net *net, struct socket *sock)
+{
+ struct sock *sk;
+
+ sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppolac_proto);
+ if (!sk)
+ return -ENOMEM;
+
+ sock_init_data(sock, sk);
+ sock->state = SS_UNCONNECTED;
+ sock->ops = &pppolac_proto_ops;
+ sk->sk_protocol = PX_PROTO_OLAC;
+ sk->sk_state = PPPOX_NONE;
+ return 0;
+}
+
+/******************************************************************************/
+
+static struct pppox_proto pppolac_pppox_proto = {
+ .create = pppolac_create,
+ .owner = THIS_MODULE,
+};
+
+static int __init pppolac_init(void)
+{
+ int error;
+
+ error = proto_register(&pppolac_proto, 0);
+ if (error)
+ return error;
+
+ error = register_pppox_proto(PX_PROTO_OLAC, &pppolac_pppox_proto);
+ if (error)
+ proto_unregister(&pppolac_proto);
+ else
+ skb_queue_head_init(&delivery_queue);
+ return error;
+}
+
+static void __exit pppolac_exit(void)
+{
+ unregister_pppox_proto(PX_PROTO_OLAC);
+ proto_unregister(&pppolac_proto);
+}
+
+module_init(pppolac_init);
+module_exit(pppolac_exit);
+
+MODULE_DESCRIPTION("PPP on L2TP Access Concentrator (PPPoLAC)");
+MODULE_AUTHOR("Chia-chi Yeh <chiachi@android.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ppp/pppopns.c b/drivers/net/ppp/pppopns.c
new file mode 100644
index 00000000000..6016d29c066
--- /dev/null
+++ b/drivers/net/ppp/pppopns.c
@@ -0,0 +1,428 @@
+/* drivers/net/pppopns.c
+ *
+ * Driver for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* This driver handles PPTP data packets between a RAW socket and a PPP channel.
+ * The socket is created in the kernel space and connected to the same address
+ * of the control socket. Outgoing packets are always sent with sequences but
+ * without acknowledgements. Incoming packets with sequences are reordered
+ * within a sliding window of one second. Currently reordering only happens when
+ * a packet is received. It is done for simplicity since no additional locks or
+ * threads are required. This driver should work on both IPv4 and IPv6. */
+
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/file.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/ppp_defs.h>
+#include <linux/if.h>
+#include <linux/if_ppp.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_channel.h>
+#include <asm/uaccess.h>
+
+#define GRE_HEADER_SIZE 8
+
+#define PPTP_GRE_BITS htons(0x2001)
+#define PPTP_GRE_BITS_MASK htons(0xEF7F)
+#define PPTP_GRE_SEQ_BIT htons(0x1000)
+#define PPTP_GRE_ACK_BIT htons(0x0080)
+#define PPTP_GRE_TYPE htons(0x880B)
+
+#define PPP_ADDR 0xFF
+#define PPP_CTRL 0x03
+
+struct header {
+ __u16 bits;
+ __u16 type;
+ __u16 length;
+ __u16 call;
+ __u32 sequence;
+} __attribute__((packed));
+
+struct meta {
+ __u32 sequence;
+ __u32 timestamp;
+};
+
+static inline struct meta *skb_meta(struct sk_buff *skb)
+{
+ return (struct meta *)skb->cb;
+}
+
+/******************************************************************************/
+
+static int pppopns_recv_core(struct sock *sk_raw, struct sk_buff *skb)
+{
+ struct sock *sk = (struct sock *)sk_raw->sk_user_data;
+ struct pppopns_opt *opt = &pppox_sk(sk)->proto.pns;
+ struct meta *meta = skb_meta(skb);
+ __u32 now = jiffies;
+ struct header *hdr;
+
+ /* Skip transport header */
+ skb_pull(skb, skb_transport_header(skb) - skb->data);
+
+ /* Drop the packet if GRE header is missing. */
+ if (skb->len < GRE_HEADER_SIZE)
+ goto drop;
+ hdr = (struct header *)skb->data;
+
+ /* Check the header. */
+ if (hdr->type != PPTP_GRE_TYPE || hdr->call != opt->local ||
+ (hdr->bits & PPTP_GRE_BITS_MASK) != PPTP_GRE_BITS)
+ goto drop;
+
+ /* Skip all fields including optional ones. */
+ if (!skb_pull(skb, GRE_HEADER_SIZE +
+ (hdr->bits & PPTP_GRE_SEQ_BIT ? 4 : 0) +
+ (hdr->bits & PPTP_GRE_ACK_BIT ? 4 : 0)))
+ goto drop;
+
+ /* Check the length. */
+ if (skb->len != ntohs(hdr->length))
+ goto drop;
+
+ /* Check the sequence if it is present. */
+ if (hdr->bits & PPTP_GRE_SEQ_BIT) {
+ meta->sequence = ntohl(hdr->sequence);
+ if ((__s32)(meta->sequence - opt->recv_sequence) < 0)
+ goto drop;
+ }
+
+ /* Skip PPP address and control if they are present. */
+ if (skb->len >= 2 && skb->data[0] == PPP_ADDR &&
+ skb->data[1] == PPP_CTRL)
+ skb_pull(skb, 2);
+
+ /* Fix PPP protocol if it is compressed. */
+ if (skb->len >= 1 && skb->data[0] & 1)
+ skb_push(skb, 1)[0] = 0;
+
+ /* Drop the packet if PPP protocol is missing. */
+ if (skb->len < 2)
+ goto drop;
+
+ /* Perform reordering if sequencing is enabled. */
+ if (hdr->bits & PPTP_GRE_SEQ_BIT) {
+ struct sk_buff *skb1;
+
+ /* Insert the packet into receive queue in order. */
+ skb_set_owner_r(skb, sk);
+ skb_queue_walk(&sk->sk_receive_queue, skb1) {
+ struct meta *meta1 = skb_meta(skb1);
+ __s32 order = meta->sequence - meta1->sequence;
+ if (order == 0)
+ goto drop;
+ if (order < 0) {
+ meta->timestamp = meta1->timestamp;
+ skb_insert(skb1, skb, &sk->sk_receive_queue);
+ skb = NULL;
+ break;
+ }
+ }
+ if (skb) {
+ meta->timestamp = now;
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ }
+
+ /* Remove packets from receive queue as long as
+ * 1. the receive buffer is full,
+ * 2. they are queued longer than one second, or
+ * 3. there are no missing packets before them. */
+ skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) {
+ meta = skb_meta(skb);
+ if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
+ now - meta->timestamp < HZ &&
+ meta->sequence != opt->recv_sequence)
+ break;
+ skb_unlink(skb, &sk->sk_receive_queue);
+ opt->recv_sequence = meta->sequence + 1;
+ skb_orphan(skb);
+ ppp_input(&pppox_sk(sk)->chan, skb);
+ }
+ return NET_RX_SUCCESS;
+ }
+
+ /* Flush receive queue if sequencing is disabled. */
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_orphan(skb);
+ ppp_input(&pppox_sk(sk)->chan, skb);
+ return NET_RX_SUCCESS;
+drop:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+static void pppopns_recv(struct sock *sk_raw, int length)
+{
+ struct sk_buff *skb;
+ while ((skb = skb_dequeue(&sk_raw->sk_receive_queue))) {
+ sock_hold(sk_raw);
+ sk_receive_skb(sk_raw, skb, 0);
+ }
+}
+
+static struct sk_buff_head delivery_queue;
+
+static void pppopns_xmit_core(struct work_struct *delivery_work)
+{
+ mm_segment_t old_fs = get_fs();
+ struct sk_buff *skb;
+
+ set_fs(KERNEL_DS);
+ while ((skb = skb_dequeue(&delivery_queue))) {
+ struct sock *sk_raw = skb->sk;
+ struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
+ struct msghdr msg = {
+ .msg_iov = (struct iovec *)&iov,
+ .msg_iovlen = 1,
+ .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT,
+ };
+ sk_raw->sk_prot->sendmsg(NULL, sk_raw, &msg, skb->len);
+ kfree_skb(skb);
+ }
+ set_fs(old_fs);
+}
+
+static DECLARE_WORK(delivery_work, pppopns_xmit_core);
+
+static int pppopns_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+ struct sock *sk_raw = (struct sock *)chan->private;
+ struct pppopns_opt *opt = &pppox_sk(sk_raw->sk_user_data)->proto.pns;
+ struct header *hdr;
+ __u16 length;
+
+ /* Install PPP address and control. */
+ skb_push(skb, 2);
+ skb->data[0] = PPP_ADDR;
+ skb->data[1] = PPP_CTRL;
+ length = skb->len;
+
+ /* Install PPTP GRE header. */
+ hdr = (struct header *)skb_push(skb, 12);
+ hdr->bits = PPTP_GRE_BITS | PPTP_GRE_SEQ_BIT;
+ hdr->type = PPTP_GRE_TYPE;
+ hdr->length = htons(length);
+ hdr->call = opt->remote;
+ hdr->sequence = htonl(opt->xmit_sequence);
+ opt->xmit_sequence++;
+
+ /* Now send the packet via the delivery queue. */
+ skb_set_owner_w(skb, sk_raw);
+ skb_queue_tail(&delivery_queue, skb);
+ schedule_work(&delivery_work);
+ return 1;
+}
+
+/******************************************************************************/
+
+static struct ppp_channel_ops pppopns_channel_ops = {
+ .start_xmit = pppopns_xmit,
+};
+
+static int pppopns_connect(struct socket *sock, struct sockaddr *useraddr,
+ int addrlen, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct pppox_sock *po = pppox_sk(sk);
+ struct sockaddr_pppopns *addr = (struct sockaddr_pppopns *)useraddr;
+ struct sockaddr_storage ss;
+ struct socket *sock_tcp = NULL;
+ struct socket *sock_raw = NULL;
+ struct sock *sk_tcp;
+ struct sock *sk_raw;
+ int error;
+
+ if (addrlen != sizeof(struct sockaddr_pppopns))
+ return -EINVAL;
+
+ lock_sock(sk);
+ error = -EALREADY;
+ if (sk->sk_state != PPPOX_NONE)
+ goto out;
+
+ sock_tcp = sockfd_lookup(addr->tcp_socket, &error);
+ if (!sock_tcp)
+ goto out;
+ sk_tcp = sock_tcp->sk;
+ error = -EPROTONOSUPPORT;
+ if (sk_tcp->sk_protocol != IPPROTO_TCP)
+ goto out;
+ addrlen = sizeof(struct sockaddr_storage);
+ error = kernel_getpeername(sock_tcp, (struct sockaddr *)&ss, &addrlen);
+ if (error)
+ goto out;
+ if (!sk_tcp->sk_bound_dev_if) {
+ struct dst_entry *dst = sk_dst_get(sk_tcp);
+ error = -ENODEV;
+ if (!dst)
+ goto out;
+ sk_tcp->sk_bound_dev_if = dst->dev->ifindex;
+ dst_release(dst);
+ }
+
+ error = sock_create(ss.ss_family, SOCK_RAW, IPPROTO_GRE, &sock_raw);
+ if (error)
+ goto out;
+ sk_raw = sock_raw->sk;
+ sk_raw->sk_bound_dev_if = sk_tcp->sk_bound_dev_if;
+ error = kernel_connect(sock_raw, (struct sockaddr *)&ss, addrlen, 0);
+ if (error)
+ goto out;
+
+ po->chan.hdrlen = 14;
+ po->chan.private = sk_raw;
+ po->chan.ops = &pppopns_channel_ops;
+ po->chan.mtu = PPP_MRU - 80;
+ po->proto.pns.local = addr->local;
+ po->proto.pns.remote = addr->remote;
+ po->proto.pns.data_ready = sk_raw->sk_data_ready;
+ po->proto.pns.backlog_rcv = sk_raw->sk_backlog_rcv;
+
+ error = ppp_register_channel(&po->chan);
+ if (error)
+ goto out;
+
+ sk->sk_state = PPPOX_CONNECTED;
+ lock_sock(sk_raw);
+ sk_raw->sk_data_ready = pppopns_recv;
+ sk_raw->sk_backlog_rcv = pppopns_recv_core;
+ sk_raw->sk_user_data = sk;
+ release_sock(sk_raw);
+out:
+ if (sock_tcp)
+ sockfd_put(sock_tcp);
+ if (error && sock_raw)
+ sock_release(sock_raw);
+ release_sock(sk);
+ return error;
+}
+
+static int pppopns_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+
+ if (!sk)
+ return 0;
+
+ lock_sock(sk);
+ if (sock_flag(sk, SOCK_DEAD)) {
+ release_sock(sk);
+ return -EBADF;
+ }
+
+ if (sk->sk_state != PPPOX_NONE) {
+ struct sock *sk_raw = (struct sock *)pppox_sk(sk)->chan.private;
+ lock_sock(sk_raw);
+ skb_queue_purge(&sk->sk_receive_queue);
+ pppox_unbind_sock(sk);
+ sk_raw->sk_data_ready = pppox_sk(sk)->proto.pns.data_ready;
+ sk_raw->sk_backlog_rcv = pppox_sk(sk)->proto.pns.backlog_rcv;
+ sk_raw->sk_user_data = NULL;
+ release_sock(sk_raw);
+ sock_release(sk_raw->sk_socket);
+ }
+
+ sock_orphan(sk);
+ sock->sk = NULL;
+ release_sock(sk);
+ sock_put(sk);
+ return 0;
+}
+
+/******************************************************************************/
+
+static struct proto pppopns_proto = {
+ .name = "PPPOPNS",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct pppox_sock),
+};
+
+static struct proto_ops pppopns_proto_ops = {
+ .family = PF_PPPOX,
+ .owner = THIS_MODULE,
+ .release = pppopns_release,
+ .bind = sock_no_bind,
+ .connect = pppopns_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = sock_no_getname,
+ .poll = sock_no_poll,
+ .ioctl = pppox_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = sock_no_setsockopt,
+ .getsockopt = sock_no_getsockopt,
+ .sendmsg = sock_no_sendmsg,
+ .recvmsg = sock_no_recvmsg,
+ .mmap = sock_no_mmap,
+};
+
+static int pppopns_create(struct net *net, struct socket *sock)
+{
+ struct sock *sk;
+
+ sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppopns_proto);
+ if (!sk)
+ return -ENOMEM;
+
+ sock_init_data(sock, sk);
+ sock->state = SS_UNCONNECTED;
+ sock->ops = &pppopns_proto_ops;
+ sk->sk_protocol = PX_PROTO_OPNS;
+ sk->sk_state = PPPOX_NONE;
+ return 0;
+}
+
+/******************************************************************************/
+
+static struct pppox_proto pppopns_pppox_proto = {
+ .create = pppopns_create,
+ .owner = THIS_MODULE,
+};
+
+static int __init pppopns_init(void)
+{
+ int error;
+
+ error = proto_register(&pppopns_proto, 0);
+ if (error)
+ return error;
+
+ error = register_pppox_proto(PX_PROTO_OPNS, &pppopns_pppox_proto);
+ if (error)
+ proto_unregister(&pppopns_proto);
+ else
+ skb_queue_head_init(&delivery_queue);
+ return error;
+}
+
+static void __exit pppopns_exit(void)
+{
+ unregister_pppox_proto(PX_PROTO_OPNS);
+ proto_unregister(&pppopns_proto);
+}
+
+module_init(pppopns_init);
+module_exit(pppopns_exit);
+
+MODULE_DESCRIPTION("PPP on PPTP Network Server (PPPoPNS)");
+MODULE_AUTHOR("Chia-chi Yeh <chiachi@android.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 582497103fe..aeff706f163 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1898,6 +1898,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
int vnet_hdr_sz;
int ret;
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+ if (cmd != TUNGETIFF && !capable(CAP_NET_ADMIN)) {
+ return -EPERM;
+ }
+#endif
+
if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
if (copy_from_user(&ifr, argp, ifreq_len))
return -EFAULT;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index f8f0156dff4..5b0a49cf1de 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -264,6 +264,11 @@ config MWL8K
To compile this driver as a module, choose M here: the module
will be called mwl8k. If unsure, say N.
+config WIFI_CONTROL_FUNC
+ bool "Enable WiFi control function abstraction"
+ help
+ Enables Power/Reset/Carddetect function abstraction
+
source "drivers/net/wireless/ath/Kconfig"
source "drivers/net/wireless/b43/Kconfig"
source "drivers/net/wireless/b43legacy/Kconfig"
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 1c517c34e4b..082d3c2714e 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -67,23 +67,40 @@ static int __power_supply_changed_work(struct device *dev, void *data)
static void power_supply_changed_work(struct work_struct *work)
{
+ unsigned long flags;
struct power_supply *psy = container_of(work, struct power_supply,
changed_work);
dev_dbg(psy->dev, "%s\n", __func__);
- class_for_each_device(power_supply_class, NULL, psy,
- __power_supply_changed_work);
+ spin_lock_irqsave(&psy->changed_lock, flags);
+ if (psy->changed) {
+ psy->changed = false;
+ spin_unlock_irqrestore(&psy->changed_lock, flags);
- power_supply_update_leds(psy);
+ class_for_each_device(power_supply_class, NULL, psy,
+ __power_supply_changed_work);
- kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE);
+ power_supply_update_leds(psy);
+
+ kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE);
+ spin_lock_irqsave(&psy->changed_lock, flags);
+ }
+ if (!psy->changed)
+ pm_relax(psy->dev);
+ spin_unlock_irqrestore(&psy->changed_lock, flags);
}
void power_supply_changed(struct power_supply *psy)
{
+ unsigned long flags;
+
dev_dbg(psy->dev, "%s\n", __func__);
+ spin_lock_irqsave(&psy->changed_lock, flags);
+ psy->changed = true;
+ pm_stay_awake(psy->dev);
+ spin_unlock_irqrestore(&psy->changed_lock, flags);
schedule_work(&psy->changed_work);
}
EXPORT_SYMBOL_GPL(power_supply_changed);
@@ -504,6 +521,11 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
if (rc)
goto device_add_failed;
+ spin_lock_init(&psy->changed_lock);
+ rc = device_init_wakeup(dev, true);
+ if (rc)
+ goto wakeup_init_failed;
+
rc = psy_register_thermal(psy);
if (rc)
goto register_thermal_failed;
@@ -525,6 +547,7 @@ create_triggers_failed:
register_cooler_failed:
psy_unregister_thermal(psy);
register_thermal_failed:
+wakeup_init_failed:
device_del(dev);
kobject_set_name_failed:
device_add_failed:
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 29178f78d73..5a5fef4447e 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -189,6 +189,10 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(time_to_full_avg),
POWER_SUPPLY_ATTR(type),
POWER_SUPPLY_ATTR(scope),
+ /* Local extensions */
+ POWER_SUPPLY_ATTR(usb_hc),
+ POWER_SUPPLY_ATTR(usb_otg),
+ POWER_SUPPLY_ATTR(charge_enabled),
/* Properties of type `const char *' */
POWER_SUPPLY_ATTR(model_name),
POWER_SUPPLY_ATTR(manufacturer),
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index c0c95be0f96..6da535db253 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -63,6 +63,15 @@ config ANDROID_LOW_MEMORY_KILLER
---help---
Registers processes to be killed when memory is low
+config ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
+ bool "Android Low Memory Killer: detect oom_adj values"
+ depends on ANDROID_LOW_MEMORY_KILLER
+ default y
+ ---help---
+ Detect oom_adj values written to
+ /sys/module/lowmemorykiller/parameters/adj and convert them
+ to oom_score_adj values.
+
config ANDROID_INTF_ALARM_DEV
bool "Android alarm driver"
depends on RTC_CLASS
@@ -99,6 +108,8 @@ config SW_SYNC_USER
*WARNING* improper use of this can result in deadlocking kernel
drivers from userspace.
+source "drivers/staging/android/ion/Kconfig"
+
endif # if ANDROID
endmenu
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index c136299e05a..0a01e191490 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -1,5 +1,7 @@
ccflags-y += -I$(src) # needed for trace events
+obj-y += ion/
+
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
obj-$(CONFIG_ASHMEM) += ashmem.o
obj-$(CONFIG_ANDROID_LOGGER) += logger.o
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
deleted file mode 100644
index b15fb0d6b15..00000000000
--- a/drivers/staging/android/TODO
+++ /dev/null
@@ -1,10 +0,0 @@
-TODO:
- - checkpatch.pl cleanups
- - sparse fixes
- - rename files to be not so "generic"
- - make sure things build as modules properly
- - add proper arch dependencies as needed
- - audit userspace interfaces to make sure they are sane
-
-Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
-Brian Swetland <swetland@google.com>
diff --git a/drivers/staging/android/android_alarm.h b/drivers/staging/android/android_alarm.h
index 4fd32f337f9..495b20cf3bf 100644
--- a/drivers/staging/android/android_alarm.h
+++ b/drivers/staging/android/android_alarm.h
@@ -16,50 +16,10 @@
#ifndef _LINUX_ANDROID_ALARM_H
#define _LINUX_ANDROID_ALARM_H
-#include <linux/ioctl.h>
-#include <linux/time.h>
#include <linux/compat.h>
+#include <linux/ioctl.h>
-enum android_alarm_type {
- /* return code bit numbers or set alarm arg */
- ANDROID_ALARM_RTC_WAKEUP,
- ANDROID_ALARM_RTC,
- ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
- ANDROID_ALARM_ELAPSED_REALTIME,
- ANDROID_ALARM_SYSTEMTIME,
-
- ANDROID_ALARM_TYPE_COUNT,
-
- /* return code bit numbers */
- /* ANDROID_ALARM_TIME_CHANGE = 16 */
-};
-
-enum android_alarm_return_flags {
- ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
- ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
- ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
- 1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
- ANDROID_ALARM_ELAPSED_REALTIME_MASK =
- 1U << ANDROID_ALARM_ELAPSED_REALTIME,
- ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
- ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
-};
-
-/* Disable alarm */
-#define ANDROID_ALARM_CLEAR(type) _IO('a', 0 | ((type) << 4))
-
-/* Ack last alarm and wait for next */
-#define ANDROID_ALARM_WAIT _IO('a', 1)
-
-#define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size)
-/* Set alarm */
-#define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec)
-#define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec)
-#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec)
-#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec)
-#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0)))
-#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4)
-
+#include "uapi/android_alarm.h"
#ifdef CONFIG_COMPAT
#define ANDROID_ALARM_SET_COMPAT(type) ALARM_IOW(2, type, \
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index e681bdd9aa5..3511b084036 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -224,21 +224,29 @@ static ssize_t ashmem_read(struct file *file, char __user *buf,
/* If size is not set, or set to 0, always return EOF. */
if (asma->size == 0)
- goto out;
+ goto out_unlock;
if (!asma->file) {
ret = -EBADF;
- goto out;
+ goto out_unlock;
}
- ret = asma->file->f_op->read(asma->file, buf, len, pos);
- if (ret < 0)
- goto out;
+ mutex_unlock(&ashmem_mutex);
- /** Update backing file pos, since f_ops->read() doesn't */
- asma->file->f_pos = *pos;
+ /*
+ * asma and asma->file are used outside the lock here. We assume
+ * once asma->file is set it will never be changed, and will not
+ * be destroyed until all references to the file are dropped and
+ * ashmem_release is called.
+ */
+ ret = asma->file->f_op->read(asma->file, buf, len, pos);
+ if (ret >= 0) {
+ /** Update backing file pos, since f_ops->read() doesn't */
+ asma->file->f_pos = *pos;
+ }
+ return ret;
-out:
+out_unlock:
mutex_unlock(&ashmem_mutex);
return ret;
}
@@ -317,22 +325,14 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
}
get_file(asma->file);
- /*
- * XXX - Reworked to use shmem_zero_setup() instead of
- * shmem_set_file while we're in staging. -jstultz
- */
- if (vma->vm_flags & VM_SHARED) {
- ret = shmem_zero_setup(vma);
- if (ret) {
- fput(asma->file);
- goto out;
- }
+ if (vma->vm_flags & VM_SHARED)
+ shmem_set_file(vma, asma->file);
+ else {
+ if (vma->vm_file)
+ fput(vma->vm_file);
+ vma->vm_file = asma->file;
}
- if (vma->vm_file)
- fput(vma->vm_file);
- vma->vm_file = asma->file;
-
out:
mutex_unlock(&ashmem_mutex);
return ret;
@@ -413,6 +413,7 @@ out:
static int set_name(struct ashmem_area *asma, void __user *name)
{
+ int len;
int ret = 0;
char local_name[ASHMEM_NAME_LEN];
@@ -425,21 +426,19 @@ static int set_name(struct ashmem_area *asma, void __user *name)
* variable that does not need protection and later copy the local
* variable to the structure member with lock held.
*/
- if (copy_from_user(local_name, name, ASHMEM_NAME_LEN))
- return -EFAULT;
-
+ len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
+ if (len < 0)
+ return len;
+ if (len == ASHMEM_NAME_LEN)
+ local_name[ASHMEM_NAME_LEN - 1] = '\0';
mutex_lock(&ashmem_mutex);
/* cannot change an existing mapping's name */
- if (unlikely(asma->file)) {
+ if (unlikely(asma->file))
ret = -EINVAL;
- goto out;
- }
- memcpy(asma->name + ASHMEM_NAME_PREFIX_LEN,
- local_name, ASHMEM_NAME_LEN);
- asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
-out:
- mutex_unlock(&ashmem_mutex);
+ else
+ strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
+ mutex_unlock(&ashmem_mutex);
return ret;
}
diff --git a/drivers/staging/android/ashmem.h b/drivers/staging/android/ashmem.h
index 8dc0f0d3adf..5abcfd7aa70 100644
--- a/drivers/staging/android/ashmem.h
+++ b/drivers/staging/android/ashmem.h
@@ -16,35 +16,7 @@
#include <linux/ioctl.h>
#include <linux/compat.h>
-#define ASHMEM_NAME_LEN 256
-
-#define ASHMEM_NAME_DEF "dev/ashmem"
-
-/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
-#define ASHMEM_NOT_PURGED 0
-#define ASHMEM_WAS_PURGED 1
-
-/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
-#define ASHMEM_IS_UNPINNED 0
-#define ASHMEM_IS_PINNED 1
-
-struct ashmem_pin {
- __u32 offset; /* offset into region, in bytes, page-aligned */
- __u32 len; /* length forward from offset, in bytes, page-aligned */
-};
-
-#define __ASHMEMIOC 0x77
-
-#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
-#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
-#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t)
-#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4)
-#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long)
-#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6)
-#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin)
-#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
-#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9)
-#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10)
+#include "uapi/ashmem.h"
/* support of 32bit userspace on 64bit platforms */
#ifdef CONFIG_COMPAT
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index 1567ac296b3..578e670ff53 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -20,6 +20,7 @@
#include <asm/cacheflush.h>
#include <linux/fdtable.h>
#include <linux/file.h>
+#include <linux/freezer.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
@@ -36,6 +37,7 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/pid_namespace.h>
+#include <linux/security.h>
#include "binder.h"
#include "binder_trace.h"
@@ -227,8 +229,8 @@ struct binder_node {
int internal_strong_refs;
int local_weak_refs;
int local_strong_refs;
- void __user *ptr;
- void __user *cookie;
+ userptr32_t ptr;
+ userptr32_t cookie;
unsigned has_strong_ref:1;
unsigned pending_strong_ref:1;
unsigned has_weak_ref:1;
@@ -241,7 +243,7 @@ struct binder_node {
struct binder_ref_death {
struct binder_work work;
- void __user *cookie;
+ userptr32_t cookie;
};
struct binder_ref {
@@ -316,7 +318,7 @@ struct binder_proc {
int requested_threads;
int requested_threads_started;
int ready_threads;
- long default_priority;
+ int default_priority;
struct dentry *debugfs_entry;
};
@@ -358,8 +360,8 @@ struct binder_transaction {
struct binder_buffer *buffer;
unsigned int code;
unsigned int flags;
- long priority;
- long saved_priority;
+ int priority;
+ int saved_priority;
kuid_t sender_euid;
};
@@ -428,16 +430,16 @@ static inline void binder_unlock(const char *tag)
mutex_unlock(&binder_main_lock);
}
-static void binder_set_nice(long nice)
+static void binder_set_nice(int nice)
{
- long min_nice;
+ int min_nice;
if (can_nice(current, nice)) {
set_user_nice(current, nice);
return;
}
min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur;
binder_debug(BINDER_DEBUG_PRIORITY_CAP,
- "%d: nice value %ld not allowed use %ld instead\n",
+ "%d: nice value %d not allowed use %d instead\n",
current->pid, nice, min_nice);
set_user_nice(current, min_nice);
if (min_nice < 20)
@@ -514,13 +516,13 @@ static void binder_insert_allocated_buffer(struct binder_proc *proc,
}
static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
- void __user *user_ptr)
+ userptr32_t user_ptr)
{
struct rb_node *n = proc->allocated_buffers.rb_node;
struct binder_buffer *buffer;
struct binder_buffer *kern_ptr;
- kern_ptr = user_ptr - proc->user_buffer_offset
+ kern_ptr = (void *)(unsigned long)user_ptr - proc->user_buffer_offset
- offsetof(struct binder_buffer, data);
while (n) {
@@ -658,8 +660,8 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
return NULL;
}
- size = ALIGN(data_size, sizeof(void *)) +
- ALIGN(offsets_size, sizeof(void *));
+ size = ALIGN(data_size, sizeof(userptr32_t)) +
+ ALIGN(offsets_size, sizeof(userptr32_t));
if (size < data_size || size < offsets_size) {
binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
@@ -807,8 +809,8 @@ static void binder_free_buf(struct binder_proc *proc,
buffer_size = binder_buffer_size(proc, buffer);
- size = ALIGN(buffer->data_size, sizeof(void *)) +
- ALIGN(buffer->offsets_size, sizeof(void *));
+ size = ALIGN(buffer->data_size, sizeof(userptr32_t)) +
+ ALIGN(buffer->offsets_size, sizeof(userptr32_t));
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_free_buf %p size %zd buffer_size %zd\n",
@@ -855,7 +857,7 @@ static void binder_free_buf(struct binder_proc *proc,
}
static struct binder_node *binder_get_node(struct binder_proc *proc,
- void __user *ptr)
+ userptr32_t ptr)
{
struct rb_node *n = proc->nodes.rb_node;
struct binder_node *node;
@@ -874,8 +876,8 @@ static struct binder_node *binder_get_node(struct binder_proc *proc,
}
static struct binder_node *binder_new_node(struct binder_proc *proc,
- void __user *ptr,
- void __user *cookie)
+ userptr32_t ptr,
+ userptr32_t cookie)
{
struct rb_node **p = &proc->nodes.rb_node;
struct rb_node *parent = NULL;
@@ -907,7 +909,7 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d:%d node %d u%p c%p created\n",
+ "%d:%d node %d u%x c%x created\n",
proc->pid, current->pid, node->debug_id,
node->ptr, node->cookie);
return node;
@@ -1225,9 +1227,9 @@ static void binder_send_failed_reply(struct binder_transaction *t,
static void binder_transaction_buffer_release(struct binder_proc *proc,
struct binder_buffer *buffer,
- size_t *failed_at)
+ uint32_t *failed_at)
{
- size_t *offp, *off_end;
+ uint32_t *offp, *off_end;
int debug_id = buffer->debug_id;
binder_debug(BINDER_DEBUG_TRANSACTION,
@@ -1238,17 +1240,18 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
if (buffer->target_node)
binder_dec_node(buffer->target_node, 1, 0);
- offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *)));
+ offp = (uint32_t *)(buffer->data +
+ ALIGN(buffer->data_size, sizeof(userptr32_t)));
if (failed_at)
off_end = failed_at;
else
- off_end = (void *)offp + buffer->offsets_size;
+ off_end = (uint32_t *)offp + (buffer->offsets_size/4);
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
if (*offp > buffer->data_size - sizeof(*fp) ||
buffer->data_size < sizeof(*fp) ||
- !IS_ALIGNED(*offp, sizeof(void *))) {
- pr_err("transaction release %d bad offset %zd, size %zd\n",
+ !IS_ALIGNED(*offp, sizeof(userptr32_t))) {
+ pr_err("transaction release %d bad offset %x, size %zd\n",
debug_id, *offp, buffer->data_size);
continue;
}
@@ -1258,12 +1261,11 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
case BINDER_TYPE_WEAK_BINDER: {
struct binder_node *node = binder_get_node(proc, fp->binder);
if (node == NULL) {
- pr_err("transaction release %d bad node %p\n",
- debug_id, fp->binder);
+ pr_err("transaction release %d bad node %x\n", debug_id, fp->binder);
break;
}
binder_debug(BINDER_DEBUG_TRANSACTION,
- " node %d u%p\n",
+ " node %d u%x\n",
node->debug_id, node->ptr);
binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
} break;
@@ -1271,7 +1273,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
if (ref == NULL) {
- pr_err("transaction release %d bad handle %ld\n",
+ pr_err("transaction release %d bad handle %d\n",
debug_id, fp->handle);
break;
}
@@ -1283,13 +1285,13 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
case BINDER_TYPE_FD:
binder_debug(BINDER_DEBUG_TRANSACTION,
- " fd %ld\n", fp->handle);
+ " fd %d\n", fp->handle);
if (failed_at)
task_close_fd(proc, fp->handle);
break;
default:
- pr_err("transaction release %d bad object type %lx\n",
+ pr_err("transaction release %d bad object type %x\n",
debug_id, fp->type);
break;
}
@@ -1302,7 +1304,7 @@ static void binder_transaction(struct binder_proc *proc,
{
struct binder_transaction *t;
struct binder_work *tcomplete;
- size_t *offp, *off_end;
+ uint32_t *offp, *off_end;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
@@ -1382,6 +1384,10 @@ static void binder_transaction(struct binder_proc *proc,
return_error = BR_DEAD_REPLY;
goto err_dead_binder;
}
+ if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) {
+ return_error = BR_FAILED_REPLY;
+ goto err_invalid_target_handle;
+ }
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
tmp = thread->transaction_stack;
@@ -1431,14 +1437,14 @@ static void binder_transaction(struct binder_proc *proc,
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d BC_REPLY %d -> %d:%d, data %p-%p size %zd-%zd\n",
+ "%d:%d BC_REPLY %d -> %d:%d, data %x-%x size %d-%d\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_thread->pid,
tr->data.ptr.buffer, tr->data.ptr.offsets,
tr->data_size, tr->offsets_size);
else
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d BC_TRANSACTION %d -> %d - node %d, data %p-%p size %zd-%zd\n",
+ "%d:%d BC_TRANSACTION %d -> %d - node %d, data %x-%x size %d-%d\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_node->debug_id,
tr->data.ptr.buffer, tr->data.ptr.offsets,
@@ -1471,33 +1477,34 @@ static void binder_transaction(struct binder_proc *proc,
if (target_node)
binder_inc_node(target_node, 1, 0, NULL);
- offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
+ offp = (uint32_t *)(t->buffer->data +
+ ALIGN(tr->data_size, sizeof(userptr32_t)));
- if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
+ if (copy_from_user(t->buffer->data, (void *)(unsigned long)(tr->data.ptr.buffer), tr->data_size)) {
binder_user_error("%d:%d got transaction with invalid data ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
- if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
+ if (copy_from_user(offp, (void *)(unsigned long)(tr->data.ptr.offsets), tr->offsets_size)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
- if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
- binder_user_error("%d:%d got transaction with invalid offsets size, %zd\n",
+ if (!IS_ALIGNED(tr->offsets_size, sizeof(uint32_t))) {
+ binder_user_error("%d:%d got transaction with invalid offsets size, %d\n",
proc->pid, thread->pid, tr->offsets_size);
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
- off_end = (void *)offp + tr->offsets_size;
+ off_end = (uint32_t *)offp + (tr->offsets_size/4);
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
if (*offp > t->buffer->data_size - sizeof(*fp) ||
t->buffer->data_size < sizeof(*fp) ||
- !IS_ALIGNED(*offp, sizeof(void *))) {
- binder_user_error("%d:%d got transaction with invalid offset, %zd\n",
+ !IS_ALIGNED(*offp, sizeof(userptr32_t))) {
+ binder_user_error("%d:%d got transaction with invalid offset, %x\n",
proc->pid, thread->pid, *offp);
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
@@ -1518,12 +1525,16 @@ static void binder_transaction(struct binder_proc *proc,
node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
}
if (fp->cookie != node->cookie) {
- binder_user_error("%d:%d sending u%p node %d, cookie mismatch %p != %p\n",
+ binder_user_error("%d:%d sending u%x node %d, cookie mismatch %x != %x\n",
proc->pid, thread->pid,
fp->binder, node->debug_id,
fp->cookie, node->cookie);
goto err_binder_get_ref_for_node_failed;
}
+ if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_get_ref_for_node_failed;
+ }
ref = binder_get_ref_for_node(target_proc, node);
if (ref == NULL) {
return_error = BR_FAILED_REPLY;
@@ -1539,7 +1550,7 @@ static void binder_transaction(struct binder_proc *proc,
trace_binder_transaction_node_to_ref(t, node, ref);
binder_debug(BINDER_DEBUG_TRANSACTION,
- " node %d u%p -> ref %d desc %d\n",
+ " node %d u%x -> ref %d desc %d\n",
node->debug_id, node->ptr, ref->debug_id,
ref->desc);
} break;
@@ -1547,12 +1558,16 @@ static void binder_transaction(struct binder_proc *proc,
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
if (ref == NULL) {
- binder_user_error("%d:%d got transaction with invalid handle, %ld\n",
+ binder_user_error("%d:%d got transaction with invalid handle, %d\n",
proc->pid,
thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_failed;
}
+ if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_get_ref_failed;
+ }
if (ref->node->proc == target_proc) {
if (fp->type == BINDER_TYPE_HANDLE)
fp->type = BINDER_TYPE_BINDER;
@@ -1563,7 +1578,7 @@ static void binder_transaction(struct binder_proc *proc,
binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
trace_binder_transaction_ref_to_node(t, ref);
binder_debug(BINDER_DEBUG_TRANSACTION,
- " ref %d desc %d -> node %d u%p\n",
+ " ref %d desc %d -> node %d u%x\n",
ref->debug_id, ref->desc, ref->node->debug_id,
ref->node->ptr);
} else {
@@ -1590,13 +1605,13 @@ static void binder_transaction(struct binder_proc *proc,
if (reply) {
if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
- binder_user_error("%d:%d got reply with fd, %ld, but target does not allow fds\n",
+ binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fd_not_allowed;
}
} else if (!target_node->accept_fds) {
- binder_user_error("%d:%d got transaction with fd, %ld, but target does not allow fds\n",
+ binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fd_not_allowed;
@@ -1604,11 +1619,16 @@ static void binder_transaction(struct binder_proc *proc,
file = fget(fp->handle);
if (file == NULL) {
- binder_user_error("%d:%d got transaction with invalid fd, %ld\n",
+ binder_user_error("%d:%d got transaction with invalid fd, %d\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fget_failed;
}
+ if (security_binder_transfer_file(proc->tsk, target_proc->tsk, file) < 0) {
+ fput(file);
+ return_error = BR_FAILED_REPLY;
+ goto err_get_unused_fd_failed;
+ }
target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
if (target_fd < 0) {
fput(file);
@@ -1618,13 +1638,13 @@ static void binder_transaction(struct binder_proc *proc,
task_fd_install(target_proc, target_fd, file);
trace_binder_transaction_fd(t, fp->handle, target_fd);
binder_debug(BINDER_DEBUG_TRANSACTION,
- " fd %ld -> %d\n", fp->handle, target_fd);
+ " fd %d -> %d\n", fp->handle, target_fd);
/* TODO: fput? */
fp->handle = target_fd;
} break;
default:
- binder_user_error("%d:%d got transaction with invalid object type, %lx\n",
+ binder_user_error("%d:%d got transaction with invalid object type, %x\n",
proc->pid, thread->pid, fp->type);
return_error = BR_FAILED_REPLY;
goto err_bad_object_type;
@@ -1681,7 +1701,7 @@ err_dead_binder:
err_invalid_target_handle:
err_no_context_mgr_node:
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d transaction failed %d, size %zd-%zd\n",
+ "%d:%d transaction failed %d, size %d-%d\n",
proc->pid, thread->pid, return_error,
tr->data_size, tr->offsets_size);
@@ -1700,7 +1720,7 @@ err_no_context_mgr_node:
}
int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
- void __user *buffer, int size, signed long *consumed)
+ void __user *buffer, int size, int *consumed)
{
uint32_t cmd;
void __user *ptr = buffer + *consumed;
@@ -1716,11 +1736,16 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
proc->stats.bc[_IOC_NR(cmd)]++;
thread->stats.bc[_IOC_NR(cmd)]++;
}
- switch (cmd) {
- case BC_INCREFS:
- case BC_ACQUIRE:
- case BC_RELEASE:
- case BC_DECREFS: {
+ /*
+ * since the transaction's IOCTL number are generated using
+ * _IOC(dir,type,nr,size), a different userspace size will not
+ * fall through
+ */
+ switch (_IOC_NR(cmd)) {
+ case _IOC_NR(BC_INCREFS):
+ case _IOC_NR(BC_ACQUIRE):
+ case _IOC_NR(BC_RELEASE):
+ case _IOC_NR(BC_DECREFS): {
uint32_t target;
struct binder_ref *ref;
const char *debug_string;
@@ -1744,20 +1769,20 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
proc->pid, thread->pid, target);
break;
}
- switch (cmd) {
- case BC_INCREFS:
+ switch (_IOC_NR(cmd)) {
+ case _IOC_NR(BC_INCREFS):
debug_string = "IncRefs";
binder_inc_ref(ref, 0, NULL);
break;
- case BC_ACQUIRE:
+ case _IOC_NR(BC_ACQUIRE):
debug_string = "Acquire";
binder_inc_ref(ref, 1, NULL);
break;
- case BC_RELEASE:
+ case _IOC_NR(BC_RELEASE):
debug_string = "Release";
binder_dec_ref(ref, 1);
break;
- case BC_DECREFS:
+ case _IOC_NR(BC_DECREFS):
default:
debug_string = "DecRefs";
binder_dec_ref(ref, 0);
@@ -1769,21 +1794,25 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
ref->desc, ref->strong, ref->weak, ref->node->debug_id);
break;
}
- case BC_INCREFS_DONE:
- case BC_ACQUIRE_DONE: {
- void __user *node_ptr;
- void *cookie;
+ case _IOC_NR(BC_INCREFS_DONE):
+ case _IOC_NR(BC_ACQUIRE_DONE): {
+ userptr32_t node_ptr;
+ userptr32_t cookie;
struct binder_node *node;
- if (get_user(node_ptr, (void * __user *)ptr))
+ if (_IOC_SIZE(cmd) != sizeof(struct binder_ptr_cookie)) {
+ pr_err("binder: tranzaction structure size differs\n");
return -EFAULT;
- ptr += sizeof(void *);
- if (get_user(cookie, (void * __user *)ptr))
+ }
+ if (get_user(node_ptr, (userptr32_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
+ ptr += sizeof(userptr32_t);
+ if (get_user(cookie, (userptr32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(userptr32_t);
node = binder_get_node(proc, node_ptr);
if (node == NULL) {
- binder_user_error("%d:%d %s u%p no match\n",
+ binder_user_error("%d:%d %s u%x no match\n",
proc->pid, thread->pid,
cmd == BC_INCREFS_DONE ?
"BC_INCREFS_DONE" :
@@ -1792,7 +1821,7 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
break;
}
if (cookie != node->cookie) {
- binder_user_error("%d:%d %s u%p node %d cookie mismatch %p != %p\n",
+ binder_user_error("%d:%d %s u%x node %d cookie mismatch %x != %x\n",
proc->pid, thread->pid,
cmd == BC_INCREFS_DONE ?
"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
@@ -1825,34 +1854,34 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
node->debug_id, node->local_strong_refs, node->local_weak_refs);
break;
}
- case BC_ATTEMPT_ACQUIRE:
+ case _IOC_NR(BC_ATTEMPT_ACQUIRE):
pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
return -EINVAL;
- case BC_ACQUIRE_RESULT:
+ case _IOC_NR(BC_ACQUIRE_RESULT):
pr_err("BC_ACQUIRE_RESULT not supported\n");
return -EINVAL;
- case BC_FREE_BUFFER: {
- void __user *data_ptr;
+ case _IOC_NR(BC_FREE_BUFFER): {
+ userptr32_t data_ptr;
struct binder_buffer *buffer;
- if (get_user(data_ptr, (void * __user *)ptr))
+ if (get_user(data_ptr, (userptr32_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
+ ptr += sizeof(userptr32_t);
buffer = binder_buffer_lookup(proc, data_ptr);
if (buffer == NULL) {
- binder_user_error("%d:%d BC_FREE_BUFFER u%p no match\n",
+ binder_user_error("%d:%d BC_FREE_BUFFER u%x no match\n",
proc->pid, thread->pid, data_ptr);
break;
}
if (!buffer->allow_user_free) {
- binder_user_error("%d:%d BC_FREE_BUFFER u%p matched unreturned buffer\n",
+ binder_user_error("%d:%d BC_FREE_BUFFER u%x matched unreturned buffer\n",
proc->pid, thread->pid, data_ptr);
break;
}
binder_debug(BINDER_DEBUG_FREE_BUFFER,
- "%d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n",
+ "%d:%d BC_FREE_BUFFER u%x found buffer %d for %s transaction\n",
proc->pid, thread->pid, data_ptr, buffer->debug_id,
buffer->transaction ? "active" : "finished");
@@ -1873,10 +1902,14 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
break;
}
- case BC_TRANSACTION:
- case BC_REPLY: {
+ case _IOC_NR(BC_TRANSACTION):
+ case _IOC_NR(BC_REPLY): {
struct binder_transaction_data tr;
+ if (_IOC_SIZE(cmd) != sizeof(tr)) {
+ pr_err("binder: tranzaction structure size differs\n");
+ return -EFAULT;
+ }
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
@@ -1884,7 +1917,7 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
break;
}
- case BC_REGISTER_LOOPER:
+ case _IOC_NR(BC_REGISTER_LOOPER):
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BC_REGISTER_LOOPER\n",
proc->pid, thread->pid);
@@ -1902,7 +1935,7 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
}
thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
break;
- case BC_ENTER_LOOPER:
+ case _IOC_NR(BC_ENTER_LOOPER):
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BC_ENTER_LOOPER\n",
proc->pid, thread->pid);
@@ -1913,26 +1946,26 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
}
thread->looper |= BINDER_LOOPER_STATE_ENTERED;
break;
- case BC_EXIT_LOOPER:
+ case _IOC_NR(BC_EXIT_LOOPER):
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BC_EXIT_LOOPER\n",
proc->pid, thread->pid);
thread->looper |= BINDER_LOOPER_STATE_EXITED;
break;
- case BC_REQUEST_DEATH_NOTIFICATION:
- case BC_CLEAR_DEATH_NOTIFICATION: {
+ case _IOC_NR(BC_REQUEST_DEATH_NOTIFICATION):
+ case _IOC_NR(BC_CLEAR_DEATH_NOTIFICATION): {
uint32_t target;
- void __user *cookie;
+ userptr32_t cookie;
struct binder_ref *ref;
struct binder_ref_death *death;
if (get_user(target, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (get_user(cookie, (void __user * __user *)ptr))
+ if (get_user(cookie, (userptr32_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
+ ptr += sizeof(userptr32_t);
ref = binder_get_ref(proc, target);
if (ref == NULL) {
binder_user_error("%d:%d %s invalid ref %d\n",
@@ -1945,7 +1978,7 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
}
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
- "%d:%d %s %p ref %d desc %d s %d w %d for node %d\n",
+ "%d:%d %s %x ref %d desc %d s %d w %d for node %d\n",
proc->pid, thread->pid,
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
"BC_REQUEST_DEATH_NOTIFICATION" :
@@ -1988,7 +2021,7 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
}
death = ref->death;
if (death->cookie != cookie) {
- binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %p != %p\n",
+ binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %x != %x\n",
proc->pid, thread->pid,
death->cookie, cookie);
break;
@@ -2008,14 +2041,13 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
}
}
} break;
- case BC_DEAD_BINDER_DONE: {
+ case _IOC_NR(BC_DEAD_BINDER_DONE): {
struct binder_work *w;
- void __user *cookie;
+ userptr32_t cookie;
struct binder_ref_death *death = NULL;
- if (get_user(cookie, (void __user * __user *)ptr))
+ if (get_user(cookie, (userptr32_t __user *)ptr))
return -EFAULT;
-
- ptr += sizeof(void *);
+ ptr += sizeof(userptr32_t);
list_for_each_entry(w, &proc->delivered_death, entry) {
struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
if (tmp_death->cookie == cookie) {
@@ -2024,10 +2056,10 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
}
}
binder_debug(BINDER_DEBUG_DEAD_BINDER,
- "%d:%d BC_DEAD_BINDER_DONE %p found %p\n",
+ "%d:%d BC_DEAD_BINDER_DONE %x found %p\n",
proc->pid, thread->pid, cookie, death);
if (death == NULL) {
- binder_user_error("%d:%d BC_DEAD_BINDER_DONE %p not found\n",
+ binder_user_error("%d:%d BC_DEAD_BINDER_DONE %x not found\n",
proc->pid, thread->pid, cookie);
break;
}
@@ -2081,7 +2113,7 @@ static int binder_has_thread_work(struct binder_thread *thread)
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
void __user *buffer, int size,
- signed long *consumed, int non_block)
+ int *consumed, int non_block)
{
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
@@ -2140,13 +2172,13 @@ retry:
if (!binder_has_proc_work(proc, thread))
ret = -EAGAIN;
} else
- ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
+ ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
} else {
if (non_block) {
if (!binder_has_thread_work(thread))
ret = -EAGAIN;
} else
- ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
+ ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
}
binder_lock(__func__);
@@ -2227,22 +2259,22 @@ retry:
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (put_user(node->ptr, (void * __user *)ptr))
+ if (put_user((unsigned long)node->ptr, (userptr32_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
- if (put_user(node->cookie, (void * __user *)ptr))
+ ptr += sizeof(userptr32_t);
+ if (put_user((unsigned long)node->cookie, (userptr32_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
+ ptr += sizeof(userptr32_t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_USER_REFS,
- "%d:%d %s %d u%p c%p\n",
+ "%d:%d %s %d u%x c%x\n",
proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie);
} else {
list_del_init(&w->entry);
if (!weak && !strong) {
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d:%d node %d u%p c%p deleted\n",
+ "%d:%d node %d u%x c%x deleted\n",
proc->pid, thread->pid, node->debug_id,
node->ptr, node->cookie);
rb_erase(&node->rb_node, &proc->nodes);
@@ -2250,7 +2282,7 @@ retry:
binder_stats_deleted(BINDER_STAT_NODE);
} else {
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d:%d node %d u%p c%p state unchanged\n",
+ "%d:%d node %d u%x c%x state unchanged\n",
proc->pid, thread->pid, node->debug_id, node->ptr,
node->cookie);
}
@@ -2270,12 +2302,12 @@ retry:
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (put_user(death->cookie, (void * __user *)ptr))
+ if (put_user((unsigned long)death->cookie, (userptr32_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
+ ptr += sizeof(userptr32_t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
- "%d:%d %s %p\n",
+ "%d:%d %s %x\n",
proc->pid, thread->pid,
cmd == BR_DEAD_BINDER ?
"BR_DEAD_BINDER" :
@@ -2310,8 +2342,8 @@ retry:
binder_set_nice(target_node->min_priority);
cmd = BR_TRANSACTION;
} else {
- tr.target.ptr = NULL;
- tr.cookie = NULL;
+ tr.target.ptr = 0;
+ tr.cookie = 0;
cmd = BR_REPLY;
}
tr.code = t->code;
@@ -2326,13 +2358,14 @@ retry:
tr.sender_pid = 0;
}
- tr.data_size = t->buffer->data_size;
- tr.offsets_size = t->buffer->offsets_size;
- tr.data.ptr.buffer = (void *)t->buffer->data +
- proc->user_buffer_offset;
- tr.data.ptr.offsets = tr.data.ptr.buffer +
+ tr.data_size = (userptr32_t)t->buffer->data_size;
+ tr.offsets_size = (userptr32_t)t->buffer->offsets_size;
+ tr.data.ptr.buffer = (unsigned long)((void *)t->buffer->data +
+ proc->user_buffer_offset);
+
+ tr.data.ptr.offsets = (userptr32_t)(tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
- sizeof(void *));
+ sizeof(userptr32_t)));
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
@@ -2344,7 +2377,7 @@ retry:
trace_binder_transaction_received(t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %p-%p\n",
+ "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %x-%x\n",
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
"BR_REPLY",
@@ -2421,7 +2454,7 @@ static void binder_release_work(struct list_head *list)
death = container_of(w, struct binder_ref_death, work);
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
- "undelivered death notification, %p\n",
+ "undelivered death notification, %x\n",
death->cookie);
kfree(death);
binder_stats_deleted(BINDER_STAT_DEATH);
@@ -2566,10 +2599,16 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
goto err;
}
- switch (cmd) {
- case BINDER_WRITE_READ: {
+ /*
+ * since the transaction's IOCTL number are generated using
+ * _IOC(dir,type,nr,size), a different userspace size will not
+ * fall through
+ */
+ switch (_IOC_NR(cmd)) {
+ case _IOC_NR(BINDER_WRITE_READ): {
struct binder_write_read bwr;
if (size != sizeof(struct binder_write_read)) {
+ pr_err("binder: BINDER_WRITE_READ transaction size differs\n");
ret = -EINVAL;
goto err;
}
@@ -2578,12 +2617,12 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
goto err;
}
binder_debug(BINDER_DEBUG_READ_WRITE,
- "%d:%d write %ld at %08lx, read %ld at %08lx\n",
+ "%d:%d write %d at %08x, read %d at %08x\n",
proc->pid, thread->pid, bwr.write_size,
bwr.write_buffer, bwr.read_size, bwr.read_buffer);
if (bwr.write_size > 0) {
- ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
+ ret = binder_thread_write(proc, thread, (void __user *)(unsigned long)(bwr.write_buffer), bwr.write_size, &bwr.write_consumed);
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
@@ -2593,7 +2632,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
}
if (bwr.read_size > 0) {
- ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
+ ret = binder_thread_read(proc, thread, (void __user *)(unsigned long)(bwr.read_buffer), bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
@@ -2604,7 +2643,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
}
binder_debug(BINDER_DEBUG_READ_WRITE,
- "%d:%d wrote %ld of %ld, read return %ld of %ld\n",
+ "%d:%d wrote %d of %d, read return %d of %d\n",
proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
bwr.read_consumed, bwr.read_size);
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
@@ -2613,18 +2652,21 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
break;
}
- case BINDER_SET_MAX_THREADS:
+ case _IOC_NR(BINDER_SET_MAX_THREADS):
if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
ret = -EINVAL;
goto err;
}
break;
- case BINDER_SET_CONTEXT_MGR:
+ case _IOC_NR(BINDER_SET_CONTEXT_MGR):
if (binder_context_mgr_node != NULL) {
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
ret = -EBUSY;
goto err;
}
+ ret = security_binder_set_context_mgr(proc->tsk);
+ if (ret < 0)
+ goto err;
if (uid_valid(binder_context_mgr_uid)) {
if (!uid_eq(binder_context_mgr_uid, current->cred->euid)) {
pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
@@ -2635,7 +2677,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
} else
binder_context_mgr_uid = current->cred->euid;
- binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
+ binder_context_mgr_node = binder_new_node(proc, 0, 0);
if (binder_context_mgr_node == NULL) {
ret = -ENOMEM;
goto err;
@@ -2645,14 +2687,15 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
binder_context_mgr_node->has_strong_ref = 1;
binder_context_mgr_node->has_weak_ref = 1;
break;
- case BINDER_THREAD_EXIT:
+ case _IOC_NR(BINDER_THREAD_EXIT):
binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
proc->pid, thread->pid);
binder_free_thread(proc, thread);
thread = NULL;
break;
- case BINDER_VERSION:
+ case _IOC_NR(BINDER_VERSION):
if (size != sizeof(struct binder_version)) {
+ pr_err("binder: BINDER_VERSION size differs\n");
ret = -EINVAL;
goto err;
}
@@ -2662,6 +2705,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
break;
default:
+ pr_err("binder: IOCTL No. not found\n");
ret = -EINVAL;
goto err;
}
@@ -3086,7 +3130,7 @@ static void print_binder_transaction(struct seq_file *m, const char *prefix,
struct binder_transaction *t)
{
seq_printf(m,
- "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
+ "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %d r%d",
prefix, t->debug_id, t,
t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0,
@@ -3131,7 +3175,7 @@ static void print_binder_work(struct seq_file *m, const char *prefix,
break;
case BINDER_WORK_NODE:
node = container_of(w, struct binder_node, work);
- seq_printf(m, "%snode work %d: u%p c%p\n",
+ seq_printf(m, "%snode work %d: u%x c%x\n",
prefix, node->debug_id, node->ptr, node->cookie);
break;
case BINDER_WORK_DEAD_BINDER:
@@ -3192,7 +3236,7 @@ static void print_binder_node(struct seq_file *m, struct binder_node *node)
hlist_for_each_entry(ref, &node->refs, node_entry)
count++;
- seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d",
+ seq_printf(m, " node %d: u%x c%x hs %d hw %d ls %d lw %d is %d iw %d",
node->debug_id, node->ptr, node->cookie,
node->has_strong_ref, node->has_weak_ref,
node->local_strong_refs, node->local_weak_refs,
@@ -3495,6 +3539,7 @@ static const struct file_operations binder_fops = {
.owner = THIS_MODULE,
.poll = binder_poll,
.unlocked_ioctl = binder_ioctl,
+ .compat_ioctl = binder_ioctl, /* handler for 32-bit compat layer */
.mmap = binder_mmap,
.open = binder_open,
.flush = binder_flush,
diff --git a/drivers/staging/android/binder.h b/drivers/staging/android/binder.h
index dbe81ceca1b..f514f30db1a 100644
--- a/drivers/staging/android/binder.h
+++ b/drivers/staging/android/binder.h
@@ -26,6 +26,8 @@
((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
#define B_TYPE_LARGE 0x85
+typedef uint32_t userptr32_t;
+
enum {
BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
@@ -48,17 +50,17 @@ enum {
*/
struct flat_binder_object {
/* 8 bytes for large_flat_header. */
- unsigned long type;
- unsigned long flags;
+ uint32_t type;
+ uint32_t flags;
/* 8 bytes of data. */
union {
- void __user *binder; /* local object */
- signed long handle; /* remote object */
+ userptr32_t binder; /* local object */
+ int32_t handle; /* remote object */
};
/* extra data associated with local object */
- void __user *cookie;
+ userptr32_t cookie;
};
/*
@@ -67,18 +69,18 @@ struct flat_binder_object {
*/
struct binder_write_read {
- signed long write_size; /* bytes to write */
- signed long write_consumed; /* bytes consumed by driver */
- unsigned long write_buffer;
- signed long read_size; /* bytes to read */
- signed long read_consumed; /* bytes consumed by driver */
- unsigned long read_buffer;
+ int32_t write_size; /* bytes to write */
+ int32_t write_consumed; /* bytes consumed by driver */
+ uint32_t write_buffer;
+ int32_t read_size; /* bytes to read */
+ int32_t read_consumed; /* bytes consumed by driver */
+ uint32_t read_buffer;
};
/* Use with BINDER_VERSION, driver fills in fields. */
struct binder_version {
/* driver protocol version -- increment with incompatible change */
- signed long protocol_version;
+ int32_t protocol_version;
};
/* This is the current protocol version. */
@@ -86,7 +88,7 @@ struct binder_version {
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
-#define BINDER_SET_MAX_THREADS _IOW('b', 5, size_t)
+#define BINDER_SET_MAX_THREADS _IOW('b', 5, uint32_t)
#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
@@ -119,18 +121,18 @@ struct binder_transaction_data {
* identifying the target and contents of the transaction.
*/
union {
- size_t handle; /* target descriptor of command transaction */
- void *ptr; /* target descriptor of return transaction */
+ uint32_t handle; /* target descriptor of command transaction */
+ userptr32_t ptr; /* target descriptor of return transaction */
} target;
- void *cookie; /* target object cookie */
+ userptr32_t cookie; /* target object cookie */
unsigned int code; /* transaction command */
/* General information about the transaction. */
- unsigned int flags;
+ __u32 flags;
pid_t sender_pid;
uid_t sender_euid;
- size_t data_size; /* number of bytes of data */
- size_t offsets_size; /* number of bytes of offsets */
+ uint32_t data_size; /* number of bytes of data */
+ uint32_t offsets_size; /* number of bytes of offsets */
/* If this transaction is inline, the data immediately
* follows here; otherwise, it ends with a pointer to
@@ -139,32 +141,32 @@ struct binder_transaction_data {
union {
struct {
/* transaction data */
- const void __user *buffer;
+ userptr32_t buffer;
/* offsets from buffer to flat_binder_object structs */
- const void __user *offsets;
+ userptr32_t offsets;
} ptr;
- uint8_t buf[8];
+ __u8 buf[8];
} data;
};
struct binder_ptr_cookie {
- void *ptr;
- void *cookie;
+ userptr32_t tr;
+ userptr32_t cookie;
};
struct binder_pri_desc {
- int priority;
- int desc;
+ __s32 priority;
+ __u32 desc;
};
struct binder_pri_ptr_cookie {
int priority;
- void *ptr;
- void *cookie;
+ userptr32_t ptr;
+ userptr32_t cookie;
};
enum binder_driver_return_protocol {
- BR_ERROR = _IOR('r', 0, int),
+ BR_ERROR = _IOR('r', 0, __s32),
/*
* int: error code
*/
@@ -178,7 +180,7 @@ enum binder_driver_return_protocol {
* binder_transaction_data: the received command.
*/
- BR_ACQUIRE_RESULT = _IOR('r', 4, int),
+ BR_ACQUIRE_RESULT = _IOR('r', 4, __s32),
/*
* not currently supported
* int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
@@ -235,11 +237,11 @@ enum binder_driver_return_protocol {
* stop threadpool thread
*/
- BR_DEAD_BINDER = _IOR('r', 15, void *),
+ BR_DEAD_BINDER = _IOR('r', 15, userptr32_t),
/*
* void *: cookie
*/
- BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *),
+ BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, userptr32_t),
/*
* void *: cookie
*/
@@ -258,22 +260,22 @@ enum binder_driver_command_protocol {
* binder_transaction_data: the sent command.
*/
- BC_ACQUIRE_RESULT = _IOW('c', 2, int),
+ BC_ACQUIRE_RESULT = _IOW('c', 2, __s32),
/*
* not currently supported
* int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful.
* Else you have acquired a primary reference on the object.
*/
- BC_FREE_BUFFER = _IOW('c', 3, int),
+ BC_FREE_BUFFER = _IOW('c', 3, void *),
/*
* void *: ptr to transaction data received on a read
*/
- BC_INCREFS = _IOW('c', 4, int),
- BC_ACQUIRE = _IOW('c', 5, int),
- BC_RELEASE = _IOW('c', 6, int),
- BC_DECREFS = _IOW('c', 7, int),
+ BC_INCREFS = _IOW('c', 4, __u32),
+ BC_ACQUIRE = _IOW('c', 5, __u32),
+ BC_RELEASE = _IOW('c', 6, __u32),
+ BC_DECREFS = _IOW('c', 7, __u32),
/*
* int: descriptor
*/
@@ -320,7 +322,7 @@ enum binder_driver_command_protocol {
* void *: cookie
*/
- BC_DEAD_BINDER_DONE = _IOW('c', 16, void *),
+ BC_DEAD_BINDER_DONE = _IOW('c', 16, userptr32_t),
/*
* void *: cookie
*/
diff --git a/drivers/staging/android/binder_trace.h b/drivers/staging/android/binder_trace.h
index 82a567c2af6..23fa45369a3 100644
--- a/drivers/staging/android/binder_trace.h
+++ b/drivers/staging/android/binder_trace.h
@@ -152,7 +152,7 @@ TRACE_EVENT(binder_transaction_node_to_ref,
TP_STRUCT__entry(
__field(int, debug_id)
__field(int, node_debug_id)
- __field(void __user *, node_ptr)
+ __field(userptr32_t, node_ptr)
__field(int, ref_debug_id)
__field(uint32_t, ref_desc)
),
@@ -163,7 +163,7 @@ TRACE_EVENT(binder_transaction_node_to_ref,
__entry->ref_debug_id = ref->debug_id;
__entry->ref_desc = ref->desc;
),
- TP_printk("transaction=%d node=%d src_ptr=0x%p ==> dest_ref=%d dest_desc=%d",
+ TP_printk("transaction=%d node=%d src_ptr=0x%x ==> dest_ref=%d dest_desc=%d",
__entry->debug_id, __entry->node_debug_id, __entry->node_ptr,
__entry->ref_debug_id, __entry->ref_desc)
);
@@ -177,7 +177,7 @@ TRACE_EVENT(binder_transaction_ref_to_node,
__field(int, ref_debug_id)
__field(uint32_t, ref_desc)
__field(int, node_debug_id)
- __field(void __user *, node_ptr)
+ __field(userptr32_t, node_ptr)
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
@@ -186,7 +186,7 @@ TRACE_EVENT(binder_transaction_ref_to_node,
__entry->node_debug_id = ref->node->debug_id;
__entry->node_ptr = ref->node->ptr;
),
- TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%p",
+ TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%x",
__entry->debug_id, __entry->node_debug_id,
__entry->ref_debug_id, __entry->ref_desc, __entry->node_ptr)
);
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
new file mode 100644
index 00000000000..0f8fec1f84e
--- /dev/null
+++ b/drivers/staging/android/ion/Kconfig
@@ -0,0 +1,35 @@
+menuconfig ION
+ bool "Ion Memory Manager"
+ depends on HAVE_MEMBLOCK
+ select GENERIC_ALLOCATOR
+ select DMA_SHARED_BUFFER
+ ---help---
+ Chose this option to enable the ION Memory Manager,
+ used by Android to efficiently allocate buffers
+ from userspace that can be shared between drivers.
+ If you're not using Android its probably safe to
+ say N here.
+
+config ION_TEST
+ tristate "Ion Test Device"
+ depends on ION
+ help
+ Choose this option to create a device that can be used to test the
+ kernel and device side ION functions.
+
+config ION_DUMMY
+ bool "Dummy Ion driver"
+ depends on ION
+ help
+ Provides a dummy ION driver that registers the
+ /dev/ion device and some basic heaps. This can
+ be used for testing the ION infrastructure if
+ one doesn't have access to hardware drivers that
+ use ION.
+
+config ION_TEGRA
+ tristate "Ion for Tegra"
+ depends on ARCH_TEGRA && ION
+ help
+ Choose this option if you wish to use ion on an nVidia Tegra.
+
diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile
new file mode 100644
index 00000000000..b56fd2bf2b4
--- /dev/null
+++ b/drivers/staging/android/ion/Makefile
@@ -0,0 +1,10 @@
+obj-$(CONFIG_ION) += ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
+ ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o
+obj-$(CONFIG_ION_TEST) += ion_test.o
+ifdef CONFIG_COMPAT
+obj-$(CONFIG_ION) += compat_ion.o
+endif
+
+obj-$(CONFIG_ION_DUMMY) += ion_dummy_driver.o
+obj-$(CONFIG_ION_TEGRA) += tegra/
+
diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c
new file mode 100644
index 00000000000..e9a8132cd56
--- /dev/null
+++ b/drivers/staging/android/ion/compat_ion.c
@@ -0,0 +1,177 @@
+/*
+ * drivers/gpu/ion/compat_ion.c
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/compat.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+
+#include "ion.h"
+#include "compat_ion.h"
+
+/* See drivers/staging/android/uapi/ion.h for the definition of these structs */
+struct compat_ion_allocation_data {
+ compat_size_t len;
+ compat_size_t align;
+ compat_uint_t heap_id_mask;
+ compat_uint_t flags;
+ compat_int_t handle;
+};
+
+struct compat_ion_custom_data {
+ compat_uint_t cmd;
+ compat_ulong_t arg;
+};
+
+#define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
+ struct compat_ion_allocation_data)
+#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+#define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \
+ struct compat_ion_custom_data)
+
+static int compat_get_ion_allocation_data(
+ struct compat_ion_allocation_data __user *data32,
+ struct ion_allocation_data __user *data)
+{
+ compat_size_t s;
+ compat_uint_t u;
+ compat_int_t i;
+ int err;
+
+ err = get_user(s, &data32->len);
+ err |= put_user(s, &data->len);
+ err |= get_user(s, &data32->align);
+ err |= put_user(s, &data->align);
+ err |= get_user(u, &data32->heap_id_mask);
+ err |= put_user(u, &data->heap_id_mask);
+ err |= get_user(u, &data32->flags);
+ err |= put_user(u, &data->flags);
+ err |= get_user(i, &data32->handle);
+ err |= put_user(i, &data->handle);
+
+ return err;
+}
+
+static int compat_put_ion_allocation_data(
+ struct compat_ion_allocation_data __user *data32,
+ struct ion_allocation_data __user *data)
+{
+ compat_size_t s;
+ compat_uint_t u;
+ compat_int_t i;
+ int err;
+
+ err = get_user(s, &data->len);
+ err |= put_user(s, &data32->len);
+ err |= get_user(s, &data->align);
+ err |= put_user(s, &data32->align);
+ err |= get_user(u, &data->heap_id_mask);
+ err |= put_user(u, &data32->heap_id_mask);
+ err |= get_user(u, &data->flags);
+ err |= put_user(u, &data32->flags);
+ err |= get_user(i, &data->handle);
+ err |= put_user(i, &data32->handle);
+
+ return err;
+}
+
+static int compat_get_ion_custom_data(
+ struct compat_ion_custom_data __user *data32,
+ struct ion_custom_data __user *data)
+{
+ compat_uint_t cmd;
+ compat_ulong_t arg;
+ int err;
+
+ err = get_user(cmd, &data32->cmd);
+ err |= put_user(cmd, &data->cmd);
+ err |= get_user(arg, &data32->arg);
+ err |= put_user(arg, &data->arg);
+
+ return err;
+};
+
+long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ if (!filp->f_op || !filp->f_op->unlocked_ioctl)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case COMPAT_ION_IOC_ALLOC:
+ {
+ struct compat_ion_allocation_data __user *data32;
+ struct ion_allocation_data __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_ion_allocation_data(data32, data);
+ if (err)
+ return err;
+ ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_ALLOC,
+ (unsigned long)data);
+ err = compat_put_ion_allocation_data(data32, data);
+ return ret ? ret : err;
+ }
+ case COMPAT_ION_IOC_FREE:
+ {
+ struct compat_ion_allocation_data __user *data32;
+ struct ion_allocation_data __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_ion_allocation_data(data32, data);
+ if (err)
+ return err;
+
+ return filp->f_op->unlocked_ioctl(filp, ION_IOC_FREE,
+ (unsigned long)data);
+ }
+ case COMPAT_ION_IOC_CUSTOM: {
+ struct compat_ion_custom_data __user *data32;
+ struct ion_custom_data __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_ion_custom_data(data32, data);
+ if (err)
+ return err;
+
+ return filp->f_op->unlocked_ioctl(filp, ION_IOC_CUSTOM,
+ (unsigned long)data);
+ }
+ case ION_IOC_SHARE:
+ case ION_IOC_MAP:
+ case ION_IOC_IMPORT:
+ case ION_IOC_SYNC:
+ return filp->f_op->unlocked_ioctl(filp, cmd,
+ (unsigned long)compat_ptr(arg));
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
diff --git a/drivers/staging/android/ion/compat_ion.h b/drivers/staging/android/ion/compat_ion.h
new file mode 100644
index 00000000000..3a9c8c08c24
--- /dev/null
+++ b/drivers/staging/android/ion/compat_ion.h
@@ -0,0 +1,30 @@
+/*
+
+ * drivers/gpu/ion/compat_ion.h
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_COMPAT_ION_H
+#define _LINUX_COMPAT_ION_H
+
+#if IS_ENABLED(CONFIG_COMPAT)
+
+long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+
+#else
+
+#define compat_ion_ioctl NULL
+
+#endif /* CONFIG_COMPAT */
+#endif /* _LINUX_COMPAT_ION_H */
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
new file mode 100644
index 00000000000..48774e3974a
--- /dev/null
+++ b/drivers/staging/android/ion/ion.c
@@ -0,0 +1,1634 @@
+/*
+
+ * drivers/gpu/ion/ion.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/file.h>
+#include <linux/freezer.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/memblock.h>
+#include <linux/miscdevice.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+#include <linux/idr.h>
+
+#include "ion.h"
+#include "ion_priv.h"
+#include "compat_ion.h"
+
+/**
+ * struct ion_device - the metadata of the ion device node
+ * @dev: the actual misc device
+ * @buffers: an rb tree of all the existing buffers
+ * @buffer_lock: lock protecting the tree of buffers
+ * @lock: rwsem protecting the tree of heaps and clients
+ * @heaps: list of all the heaps in the system
+ * @user_clients: list of all the clients created from userspace
+ */
+struct ion_device {
+ struct miscdevice dev;
+ struct rb_root buffers;
+ struct mutex buffer_lock;
+ struct rw_semaphore lock;
+ struct plist_head heaps;
+ long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
+ unsigned long arg);
+ struct rb_root clients;
+ struct dentry *debug_root;
+ struct dentry *heaps_debug_root;
+ struct dentry *clients_debug_root;
+};
+
+/**
+ * struct ion_client - a process/hw block local address space
+ * @node: node in the tree of all clients
+ * @dev: backpointer to ion device
+ * @handles: an rb tree of all the handles in this client
+ * @idr: an idr space for allocating handle ids
+ * @lock: lock protecting the tree of handles
+ * @name: used for debugging
+ * @display_name: used for debugging (unique version of @name)
+ * @display_serial: used for debugging (to make display_name unique)
+ * @task: used for debugging
+ *
+ * A client represents a list of buffers this client may access.
+ * The mutex stored here is used to protect both handles tree
+ * as well as the handles themselves, and should be held while modifying either.
+ */
+struct ion_client {
+ struct rb_node node;
+ struct ion_device *dev;
+ struct rb_root handles;
+ struct idr idr;
+ struct mutex lock;
+ const char *name;
+ char *display_name;
+ int display_serial;
+ struct task_struct *task;
+ pid_t pid;
+ struct dentry *debug_root;
+};
+
+/**
+ * ion_handle - a client local reference to a buffer
+ * @ref: reference count
+ * @client: back pointer to the client the buffer resides in
+ * @buffer: pointer to the buffer
+ * @node: node in the client's handle rbtree
+ * @kmap_cnt: count of times this client has mapped to kernel
+ * @id: client-unique id allocated by client->idr
+ *
+ * Modifications to node, map_cnt or mapping should be protected by the
+ * lock in the client. Other fields are never changed after initialization.
+ */
+struct ion_handle {
+ struct kref ref;
+ struct ion_client *client;
+ struct ion_buffer *buffer;
+ struct rb_node node;
+ unsigned int kmap_cnt;
+ int id;
+};
+
+bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
+{
+ return (buffer->flags & ION_FLAG_CACHED) &&
+ !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
+}
+
+bool ion_buffer_cached(struct ion_buffer *buffer)
+{
+ return !!(buffer->flags & ION_FLAG_CACHED);
+}
+
+static inline struct page *ion_buffer_page(struct page *page)
+{
+ return (struct page *)((unsigned long)page & ~(1UL));
+}
+
+static inline bool ion_buffer_page_is_dirty(struct page *page)
+{
+ return !!((unsigned long)page & 1UL);
+}
+
+static inline void ion_buffer_page_dirty(struct page **page)
+{
+ *page = (struct page *)((unsigned long)(*page) | 1UL);
+}
+
+static inline void ion_buffer_page_clean(struct page **page)
+{
+ *page = (struct page *)((unsigned long)(*page) & ~(1UL));
+}
+
+/* this function should only be called while dev->lock is held */
+static void ion_buffer_add(struct ion_device *dev,
+ struct ion_buffer *buffer)
+{
+ struct rb_node **p = &dev->buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_buffer *entry;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_buffer, node);
+
+ if (buffer < entry) {
+ p = &(*p)->rb_left;
+ } else if (buffer > entry) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_err("%s: buffer already found.", __func__);
+ BUG();
+ }
+ }
+
+ rb_link_node(&buffer->node, parent, p);
+ rb_insert_color(&buffer->node, &dev->buffers);
+}
+
+/* this function should only be called while dev->lock is held */
+static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
+ struct ion_device *dev,
+ unsigned long len,
+ unsigned long align,
+ unsigned long flags)
+{
+ struct ion_buffer *buffer;
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int i, ret;
+
+ buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
+ if (!buffer)
+ return ERR_PTR(-ENOMEM);
+
+ buffer->heap = heap;
+ buffer->flags = flags;
+ kref_init(&buffer->ref);
+
+ ret = heap->ops->allocate(heap, buffer, len, align, flags);
+
+ if (ret) {
+ if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
+ goto err2;
+
+ ion_heap_freelist_drain(heap, 0);
+ ret = heap->ops->allocate(heap, buffer, len, align,
+ flags);
+ if (ret)
+ goto err2;
+ }
+
+ buffer->dev = dev;
+ buffer->size = len;
+
+ table = heap->ops->map_dma(heap, buffer);
+ if (WARN_ONCE(table == NULL,
+ "heap->ops->map_dma should return ERR_PTR on error"))
+ table = ERR_PTR(-EINVAL);
+ if (IS_ERR(table)) {
+ heap->ops->free(buffer);
+ kfree(buffer);
+ return ERR_PTR(PTR_ERR(table));
+ }
+ buffer->sg_table = table;
+ if (ion_buffer_fault_user_mappings(buffer)) {
+ int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+ struct scatterlist *sg;
+ int i, j, k = 0;
+
+ buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
+ if (!buffer->pages) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
+
+ for (j = 0; j < sg->length / PAGE_SIZE; j++)
+ buffer->pages[k++] = page++;
+ }
+
+ if (ret)
+ goto err;
+ }
+
+ buffer->dev = dev;
+ buffer->size = len;
+ INIT_LIST_HEAD(&buffer->vmas);
+ mutex_init(&buffer->lock);
+ /* this will set up dma addresses for the sglist -- it is not
+ technically correct as per the dma api -- a specific
+ device isn't really taking ownership here. However, in practice on
+ our systems the only dma_address space is physical addresses.
+ Additionally, we can't afford the overhead of invalidating every
+ allocation via dma_map_sg. The implicit contract here is that
+ memory comming from the heaps is ready for dma, ie if it has a
+ cached mapping that mapping has been invalidated */
+ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
+ sg_dma_address(sg) = sg_phys(sg);
+ mutex_lock(&dev->buffer_lock);
+ ion_buffer_add(dev, buffer);
+ mutex_unlock(&dev->buffer_lock);
+ return buffer;
+
+err:
+ heap->ops->unmap_dma(heap, buffer);
+ heap->ops->free(buffer);
+err1:
+ if (buffer->pages)
+ vfree(buffer->pages);
+err2:
+ kfree(buffer);
+ return ERR_PTR(ret);
+}
+
+void ion_buffer_destroy(struct ion_buffer *buffer)
+{
+ if (WARN_ON(buffer->kmap_cnt > 0))
+ buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+ buffer->heap->ops->unmap_dma(buffer->heap, buffer);
+ buffer->heap->ops->free(buffer);
+ if (buffer->pages)
+ vfree(buffer->pages);
+ kfree(buffer);
+}
+
+static void _ion_buffer_destroy(struct kref *kref)
+{
+ struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
+ struct ion_heap *heap = buffer->heap;
+ struct ion_device *dev = buffer->dev;
+
+ mutex_lock(&dev->buffer_lock);
+ rb_erase(&buffer->node, &dev->buffers);
+ mutex_unlock(&dev->buffer_lock);
+
+ if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+ ion_heap_freelist_add(heap, buffer);
+ else
+ ion_buffer_destroy(buffer);
+}
+
+static void ion_buffer_get(struct ion_buffer *buffer)
+{
+ kref_get(&buffer->ref);
+}
+
+static int ion_buffer_put(struct ion_buffer *buffer)
+{
+ return kref_put(&buffer->ref, _ion_buffer_destroy);
+}
+
+static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
+{
+ mutex_lock(&buffer->lock);
+ buffer->handle_count++;
+ mutex_unlock(&buffer->lock);
+}
+
+static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
+{
+ /*
+ * when a buffer is removed from a handle, if it is not in
+ * any other handles, copy the taskcomm and the pid of the
+ * process it's being removed from into the buffer. At this
+ * point there will be no way to track what processes this buffer is
+ * being used by, it only exists as a dma_buf file descriptor.
+ * The taskcomm and pid can provide a debug hint as to where this fd
+ * is in the system
+ */
+ mutex_lock(&buffer->lock);
+ buffer->handle_count--;
+ BUG_ON(buffer->handle_count < 0);
+ if (!buffer->handle_count) {
+ struct task_struct *task;
+
+ task = current->group_leader;
+ get_task_comm(buffer->task_comm, task);
+ buffer->pid = task_pid_nr(task);
+ }
+ mutex_unlock(&buffer->lock);
+}
+
+static struct ion_handle *ion_handle_create(struct ion_client *client,
+ struct ion_buffer *buffer)
+{
+ struct ion_handle *handle;
+
+ handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
+ if (!handle)
+ return ERR_PTR(-ENOMEM);
+ kref_init(&handle->ref);
+ RB_CLEAR_NODE(&handle->node);
+ handle->client = client;
+ ion_buffer_get(buffer);
+ ion_buffer_add_to_handle(buffer);
+ handle->buffer = buffer;
+
+ return handle;
+}
+
+static void ion_handle_kmap_put(struct ion_handle *);
+
+static void ion_handle_destroy(struct kref *kref)
+{
+ struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
+ struct ion_client *client = handle->client;
+ struct ion_buffer *buffer = handle->buffer;
+
+ mutex_lock(&buffer->lock);
+ while (handle->kmap_cnt)
+ ion_handle_kmap_put(handle);
+ mutex_unlock(&buffer->lock);
+
+ idr_remove(&client->idr, handle->id);
+ if (!RB_EMPTY_NODE(&handle->node))
+ rb_erase(&handle->node, &client->handles);
+
+ ion_buffer_remove_from_handle(buffer);
+ ion_buffer_put(buffer);
+
+ kfree(handle);
+}
+
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
+{
+ return handle->buffer;
+}
+
+static void ion_handle_get(struct ion_handle *handle)
+{
+ kref_get(&handle->ref);
+}
+
+static int ion_handle_put(struct ion_handle *handle)
+{
+ struct ion_client *client = handle->client;
+ int ret;
+
+ mutex_lock(&client->lock);
+ ret = kref_put(&handle->ref, ion_handle_destroy);
+ mutex_unlock(&client->lock);
+
+ return ret;
+}
+
+static struct ion_handle *ion_handle_lookup(struct ion_client *client,
+ struct ion_buffer *buffer)
+{
+ struct rb_node *n = client->handles.rb_node;
+
+ while (n) {
+ struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
+ if (buffer < entry->buffer)
+ n = n->rb_left;
+ else if (buffer > entry->buffer)
+ n = n->rb_right;
+ else
+ return entry;
+ }
+ return ERR_PTR(-EINVAL);
+}
+
+static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
+ int id)
+{
+ struct ion_handle *handle;
+
+ mutex_lock(&client->lock);
+ handle = idr_find(&client->idr, id);
+ if (handle)
+ ion_handle_get(handle);
+ mutex_unlock(&client->lock);
+
+ return handle ? handle : ERR_PTR(-EINVAL);
+}
+
+static bool ion_handle_validate(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ WARN_ON(!mutex_is_locked(&client->lock));
+ return (idr_find(&client->idr, handle->id) == handle);
+}
+
+static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
+{
+ int id;
+ struct rb_node **p = &client->handles.rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_handle *entry;
+
+ id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
+ if (id < 0)
+ return id;
+
+ handle->id = id;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_handle, node);
+
+ if (handle->buffer < entry->buffer)
+ p = &(*p)->rb_left;
+ else if (handle->buffer > entry->buffer)
+ p = &(*p)->rb_right;
+ else
+ WARN(1, "%s: buffer already found.", __func__);
+ }
+
+ rb_link_node(&handle->node, parent, p);
+ rb_insert_color(&handle->node, &client->handles);
+
+ return 0;
+}
+
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+ size_t align, unsigned int heap_id_mask,
+ unsigned int flags)
+{
+ struct ion_handle *handle;
+ struct ion_device *dev = client->dev;
+ struct ion_buffer *buffer = NULL;
+ struct ion_heap *heap;
+ int ret;
+
+ pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
+ len, align, heap_id_mask, flags);
+ /*
+ * traverse the list of heaps available in this system in priority
+ * order. If the heap type is supported by the client, and matches the
+ * request of the caller allocate from it. Repeat until allocate has
+ * succeeded or all heaps have been tried
+ */
+ len = PAGE_ALIGN(len);
+
+ if (!len)
+ return ERR_PTR(-EINVAL);
+
+ down_read(&dev->lock);
+ plist_for_each_entry(heap, &dev->heaps, node) {
+ /* if the caller didn't specify this heap id */
+ if (!((1 << heap->id) & heap_id_mask))
+ continue;
+ buffer = ion_buffer_create(heap, dev, len, align, flags);
+ if (!IS_ERR(buffer))
+ break;
+ }
+ up_read(&dev->lock);
+
+ if (buffer == NULL)
+ return ERR_PTR(-ENODEV);
+
+ if (IS_ERR(buffer))
+ return ERR_PTR(PTR_ERR(buffer));
+
+ handle = ion_handle_create(client, buffer);
+
+ /*
+ * ion_buffer_create will create a buffer with a ref_cnt of 1,
+ * and ion_handle_create will take a second reference, drop one here
+ */
+ ion_buffer_put(buffer);
+
+ if (IS_ERR(handle))
+ return handle;
+
+ mutex_lock(&client->lock);
+ ret = ion_handle_add(client, handle);
+ mutex_unlock(&client->lock);
+ if (ret) {
+ ion_handle_put(handle);
+ handle = ERR_PTR(ret);
+ }
+
+ return handle;
+}
+EXPORT_SYMBOL(ion_alloc);
+
+void ion_free(struct ion_client *client, struct ion_handle *handle)
+{
+ bool valid_handle;
+
+ BUG_ON(client != handle->client);
+
+ mutex_lock(&client->lock);
+ valid_handle = ion_handle_validate(client, handle);
+
+ if (!valid_handle) {
+ WARN(1, "%s: invalid handle passed to free.\n", __func__);
+ mutex_unlock(&client->lock);
+ return;
+ }
+ mutex_unlock(&client->lock);
+ ion_handle_put(handle);
+}
+EXPORT_SYMBOL(ion_free);
+
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct ion_buffer *buffer;
+ int ret;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ mutex_unlock(&client->lock);
+ return -EINVAL;
+ }
+
+ buffer = handle->buffer;
+
+ if (!buffer->heap->ops->phys) {
+ pr_err("%s: ion_phys is not implemented by this heap.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return -ENODEV;
+ }
+ mutex_unlock(&client->lock);
+ ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
+ return ret;
+}
+EXPORT_SYMBOL(ion_phys);
+
+static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
+{
+ void *vaddr;
+
+ if (buffer->kmap_cnt) {
+ buffer->kmap_cnt++;
+ return buffer->vaddr;
+ }
+ vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
+ if (WARN_ONCE(vaddr == NULL,
+ "heap->ops->map_kernel should return ERR_PTR on error"))
+ return ERR_PTR(-EINVAL);
+ if (IS_ERR(vaddr))
+ return vaddr;
+ buffer->vaddr = vaddr;
+ buffer->kmap_cnt++;
+ return vaddr;
+}
+
+static void *ion_handle_kmap_get(struct ion_handle *handle)
+{
+ struct ion_buffer *buffer = handle->buffer;
+ void *vaddr;
+
+ if (handle->kmap_cnt) {
+ handle->kmap_cnt++;
+ return buffer->vaddr;
+ }
+ vaddr = ion_buffer_kmap_get(buffer);
+ if (IS_ERR(vaddr))
+ return vaddr;
+ handle->kmap_cnt++;
+ return vaddr;
+}
+
+static void ion_buffer_kmap_put(struct ion_buffer *buffer)
+{
+ buffer->kmap_cnt--;
+ if (!buffer->kmap_cnt) {
+ buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+ buffer->vaddr = NULL;
+ }
+}
+
+static void ion_handle_kmap_put(struct ion_handle *handle)
+{
+ struct ion_buffer *buffer = handle->buffer;
+
+ handle->kmap_cnt--;
+ if (!handle->kmap_cnt)
+ ion_buffer_kmap_put(buffer);
+}
+
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+ void *vaddr;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to map_kernel.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-EINVAL);
+ }
+
+ buffer = handle->buffer;
+
+ if (!handle->buffer->heap->ops->map_kernel) {
+ pr_err("%s: map_kernel is not implemented by this heap.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-ENODEV);
+ }
+
+ mutex_lock(&buffer->lock);
+ vaddr = ion_handle_kmap_get(handle);
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+ return vaddr;
+}
+EXPORT_SYMBOL(ion_map_kernel);
+
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+
+ mutex_lock(&client->lock);
+ buffer = handle->buffer;
+ mutex_lock(&buffer->lock);
+ ion_handle_kmap_put(handle);
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+}
+EXPORT_SYMBOL(ion_unmap_kernel);
+
+static int ion_debug_client_show(struct seq_file *s, void *unused)
+{
+ struct ion_client *client = s->private;
+ struct rb_node *n;
+ size_t sizes[ION_NUM_HEAP_IDS] = {0};
+ const char *names[ION_NUM_HEAP_IDS] = {NULL};
+ int i;
+
+ mutex_lock(&client->lock);
+ for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+ struct ion_handle *handle = rb_entry(n, struct ion_handle,
+ node);
+ unsigned int id = handle->buffer->heap->id;
+
+ if (!names[id])
+ names[id] = handle->buffer->heap->name;
+ sizes[id] += handle->buffer->size;
+ }
+ mutex_unlock(&client->lock);
+
+ seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
+ for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
+ if (!names[i])
+ continue;
+ seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
+ }
+ return 0;
+}
+
+static int ion_debug_client_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ion_debug_client_show, inode->i_private);
+}
+
+static const struct file_operations debug_client_fops = {
+ .open = ion_debug_client_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int ion_get_client_serial(const struct rb_root *root,
+ const unsigned char *name)
+{
+ int serial = -1;
+ struct rb_node *node;
+ for (node = rb_first(root); node; node = rb_next(node)) {
+ struct ion_client *client = rb_entry(node, struct ion_client,
+ node);
+ if (strcmp(client->name, name))
+ continue;
+ serial = max(serial, client->display_serial);
+ }
+ return serial + 1;
+}
+
+struct ion_client *ion_client_create(struct ion_device *dev,
+ const char *name)
+{
+ struct ion_client *client;
+ struct task_struct *task;
+ struct rb_node **p;
+ struct rb_node *parent = NULL;
+ struct ion_client *entry;
+ pid_t pid;
+
+ if (!name) {
+ pr_err("%s: Name cannot be null\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ get_task_struct(current->group_leader);
+ task_lock(current->group_leader);
+ pid = task_pid_nr(current->group_leader);
+ /* don't bother to store task struct for kernel threads,
+ they can't be killed anyway */
+ if (current->group_leader->flags & PF_KTHREAD) {
+ put_task_struct(current->group_leader);
+ task = NULL;
+ } else {
+ task = current->group_leader;
+ }
+ task_unlock(current->group_leader);
+
+ client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
+ if (!client)
+ goto err_put_task_struct;
+
+ client->dev = dev;
+ client->handles = RB_ROOT;
+ idr_init(&client->idr);
+ mutex_init(&client->lock);
+ client->task = task;
+ client->pid = pid;
+ client->name = kstrdup(name, GFP_KERNEL);
+ if (!client->name)
+ goto err_free_client;
+
+ down_write(&dev->lock);
+ client->display_serial = ion_get_client_serial(&dev->clients, name);
+ client->display_name = kasprintf(
+ GFP_KERNEL, "%s-%d", name, client->display_serial);
+ if (!client->display_name) {
+ up_write(&dev->lock);
+ goto err_free_client_name;
+ }
+ p = &dev->clients.rb_node;
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_client, node);
+
+ if (client < entry)
+ p = &(*p)->rb_left;
+ else if (client > entry)
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&client->node, parent, p);
+ rb_insert_color(&client->node, &dev->clients);
+
+ client->debug_root = debugfs_create_file(client->display_name, 0664,
+ dev->clients_debug_root,
+ client, &debug_client_fops);
+ if (!client->debug_root) {
+ char buf[256], *path;
+ path = dentry_path(dev->clients_debug_root, buf, 256);
+ pr_err("Failed to create client debugfs at %s/%s\n",
+ path, client->display_name);
+ }
+
+ up_write(&dev->lock);
+
+ return client;
+
+err_free_client_name:
+ kfree(client->name);
+err_free_client:
+ kfree(client);
+err_put_task_struct:
+ if (task)
+ put_task_struct(current->group_leader);
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL(ion_client_create);
+
+void ion_client_destroy(struct ion_client *client)
+{
+ struct ion_device *dev = client->dev;
+ struct rb_node *n;
+
+ pr_debug("%s: %d\n", __func__, __LINE__);
+ while ((n = rb_first(&client->handles))) {
+ struct ion_handle *handle = rb_entry(n, struct ion_handle,
+ node);
+ ion_handle_destroy(&handle->ref);
+ }
+
+ idr_destroy(&client->idr);
+
+ down_write(&dev->lock);
+ if (client->task)
+ put_task_struct(client->task);
+ rb_erase(&client->node, &dev->clients);
+ debugfs_remove_recursive(client->debug_root);
+ up_write(&dev->lock);
+
+ kfree(client->display_name);
+ kfree(client->name);
+ kfree(client);
+}
+EXPORT_SYMBOL(ion_client_destroy);
+
+struct sg_table *ion_sg_table(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+ struct sg_table *table;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to map_dma.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-EINVAL);
+ }
+ buffer = handle->buffer;
+ table = buffer->sg_table;
+ mutex_unlock(&client->lock);
+ return table;
+}
+EXPORT_SYMBOL(ion_sg_table);
+
+static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
+ struct device *dev,
+ enum dma_data_direction direction);
+
+static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct dma_buf *dmabuf = attachment->dmabuf;
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ ion_buffer_sync_for_device(buffer, attachment->dev, direction);
+ return buffer->sg_table;
+}
+
+static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
+}
+
+void ion_pages_sync_for_device(struct device *dev, struct page *page,
+ size_t size, enum dma_data_direction dir)
+{
+ struct scatterlist sg;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, page, size, 0);
+ /*
+ * This is not correct - sg_dma_address needs a dma_addr_t that is valid
+ * for the the targeted device, but this works on the currently targeted
+ * hardware.
+ */
+ sg_dma_address(&sg) = page_to_phys(page);
+ dma_sync_sg_for_device(dev, &sg, 1, dir);
+}
+
+struct ion_vma_list {
+ struct list_head list;
+ struct vm_area_struct *vma;
+};
+
+static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
+ struct device *dev,
+ enum dma_data_direction dir)
+{
+ struct ion_vma_list *vma_list;
+ int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+ int i;
+
+ pr_debug("%s: syncing for device %s\n", __func__,
+ dev ? dev_name(dev) : "null");
+
+ if (!ion_buffer_fault_user_mappings(buffer))
+ return;
+
+ mutex_lock(&buffer->lock);
+ for (i = 0; i < pages; i++) {
+ struct page *page = buffer->pages[i];
+
+ if (ion_buffer_page_is_dirty(page))
+ ion_pages_sync_for_device(dev, ion_buffer_page(page),
+ PAGE_SIZE, dir);
+
+ ion_buffer_page_clean(buffer->pages + i);
+ }
+ list_for_each_entry(vma_list, &buffer->vmas, list) {
+ struct vm_area_struct *vma = vma_list->vma;
+
+ zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
+ NULL);
+ }
+ mutex_unlock(&buffer->lock);
+}
+
+static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct ion_buffer *buffer = vma->vm_private_data;
+ unsigned long pfn;
+ int ret;
+
+ mutex_lock(&buffer->lock);
+ ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
+ BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
+
+ pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
+ ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+ mutex_unlock(&buffer->lock);
+ if (ret)
+ return VM_FAULT_ERROR;
+
+ return VM_FAULT_NOPAGE;
+}
+
+static void ion_vm_open(struct vm_area_struct *vma)
+{
+ struct ion_buffer *buffer = vma->vm_private_data;
+ struct ion_vma_list *vma_list;
+
+ vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
+ if (!vma_list)
+ return;
+ vma_list->vma = vma;
+ mutex_lock(&buffer->lock);
+ list_add(&vma_list->list, &buffer->vmas);
+ mutex_unlock(&buffer->lock);
+ pr_debug("%s: adding %p\n", __func__, vma);
+}
+
+static void ion_vm_close(struct vm_area_struct *vma)
+{
+ struct ion_buffer *buffer = vma->vm_private_data;
+ struct ion_vma_list *vma_list, *tmp;
+
+ pr_debug("%s\n", __func__);
+ mutex_lock(&buffer->lock);
+ list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
+ if (vma_list->vma != vma)
+ continue;
+ list_del(&vma_list->list);
+ kfree(vma_list);
+ pr_debug("%s: deleting %p\n", __func__, vma);
+ break;
+ }
+ mutex_unlock(&buffer->lock);
+}
+
+static struct vm_operations_struct ion_vma_ops = {
+ .open = ion_vm_open,
+ .close = ion_vm_close,
+ .fault = ion_vm_fault,
+};
+
+static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ int ret = 0;
+
+ if (!buffer->heap->ops->map_user) {
+ pr_err("%s: this heap does not define a method for mapping "
+ "to userspace\n", __func__);
+ return -EINVAL;
+ }
+
+ if (ion_buffer_fault_user_mappings(buffer)) {
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
+ VM_DONTDUMP;
+ vma->vm_private_data = buffer;
+ vma->vm_ops = &ion_vma_ops;
+ ion_vm_open(vma);
+ return 0;
+ }
+
+ if (!(buffer->flags & ION_FLAG_CACHED))
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ mutex_lock(&buffer->lock);
+ /* now map it to userspace */
+ ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
+ mutex_unlock(&buffer->lock);
+
+ if (ret)
+ pr_err("%s: failure mapping buffer to userspace\n",
+ __func__);
+
+ return ret;
+}
+
+static void ion_dma_buf_release(struct dma_buf *dmabuf)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ ion_buffer_put(buffer);
+}
+
+static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ return buffer->vaddr + offset * PAGE_SIZE;
+}
+
+static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
+ void *ptr)
+{
+ return;
+}
+
+static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
+ size_t len,
+ enum dma_data_direction direction)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ void *vaddr;
+
+ if (!buffer->heap->ops->map_kernel) {
+ pr_err("%s: map kernel is not implemented by this heap.\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&buffer->lock);
+ vaddr = ion_buffer_kmap_get(buffer);
+ mutex_unlock(&buffer->lock);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+ return 0;
+}
+
+static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
+ size_t len,
+ enum dma_data_direction direction)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ ion_buffer_kmap_put(buffer);
+ mutex_unlock(&buffer->lock);
+}
+
+static struct dma_buf_ops dma_buf_ops = {
+ .map_dma_buf = ion_map_dma_buf,
+ .unmap_dma_buf = ion_unmap_dma_buf,
+ .mmap = ion_mmap,
+ .release = ion_dma_buf_release,
+ .begin_cpu_access = ion_dma_buf_begin_cpu_access,
+ .end_cpu_access = ion_dma_buf_end_cpu_access,
+ .kmap_atomic = ion_dma_buf_kmap,
+ .kunmap_atomic = ion_dma_buf_kunmap,
+ .kmap = ion_dma_buf_kmap,
+ .kunmap = ion_dma_buf_kunmap,
+};
+
+struct dma_buf *ion_share_dma_buf(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+ struct dma_buf *dmabuf;
+ bool valid_handle;
+
+ mutex_lock(&client->lock);
+ valid_handle = ion_handle_validate(client, handle);
+ if (!valid_handle) {
+ WARN(1, "%s: invalid handle passed to share.\n", __func__);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-EINVAL);
+ }
+ buffer = handle->buffer;
+ ion_buffer_get(buffer);
+ mutex_unlock(&client->lock);
+
+ dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
+ if (IS_ERR(dmabuf)) {
+ ion_buffer_put(buffer);
+ return dmabuf;
+ }
+
+ return dmabuf;
+}
+EXPORT_SYMBOL(ion_share_dma_buf);
+
+int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
+{
+ struct dma_buf *dmabuf;
+ int fd;
+
+ dmabuf = ion_share_dma_buf(client, handle);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ fd = dma_buf_fd(dmabuf, O_CLOEXEC);
+ if (fd < 0)
+ dma_buf_put(dmabuf);
+
+ return fd;
+}
+EXPORT_SYMBOL(ion_share_dma_buf_fd);
+
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
+{
+ struct dma_buf *dmabuf;
+ struct ion_buffer *buffer;
+ struct ion_handle *handle;
+ int ret;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf))
+ return ERR_PTR(PTR_ERR(dmabuf));
+ /* if this memory came from ion */
+
+ if (dmabuf->ops != &dma_buf_ops) {
+ pr_err("%s: can not import dmabuf from another exporter\n",
+ __func__);
+ dma_buf_put(dmabuf);
+ return ERR_PTR(-EINVAL);
+ }
+ buffer = dmabuf->priv;
+
+ mutex_lock(&client->lock);
+ /* if a handle exists for this buffer just take a reference to it */
+ handle = ion_handle_lookup(client, buffer);
+ if (!IS_ERR(handle)) {
+ ion_handle_get(handle);
+ mutex_unlock(&client->lock);
+ goto end;
+ }
+ mutex_unlock(&client->lock);
+
+ handle = ion_handle_create(client, buffer);
+ if (IS_ERR(handle))
+ goto end;
+
+ mutex_lock(&client->lock);
+ ret = ion_handle_add(client, handle);
+ mutex_unlock(&client->lock);
+ if (ret) {
+ ion_handle_put(handle);
+ handle = ERR_PTR(ret);
+ }
+
+end:
+ dma_buf_put(dmabuf);
+ return handle;
+}
+EXPORT_SYMBOL(ion_import_dma_buf);
+
+static int ion_sync_for_device(struct ion_client *client, int fd)
+{
+ struct dma_buf *dmabuf;
+ struct ion_buffer *buffer;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ /* if this memory came from ion */
+ if (dmabuf->ops != &dma_buf_ops) {
+ pr_err("%s: can not sync dmabuf from another exporter\n",
+ __func__);
+ dma_buf_put(dmabuf);
+ return -EINVAL;
+ }
+ buffer = dmabuf->priv;
+
+ dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+ buffer->sg_table->nents, DMA_BIDIRECTIONAL);
+ dma_buf_put(dmabuf);
+ return 0;
+}
+
+/* fix up the cases where the ioctl direction bits are incorrect */
+static unsigned int ion_ioctl_dir(unsigned int cmd)
+{
+ switch (cmd) {
+ case ION_IOC_SYNC:
+ case ION_IOC_FREE:
+ case ION_IOC_CUSTOM:
+ return _IOC_WRITE;
+ default:
+ return _IOC_DIR(cmd);
+ }
+}
+
+static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct ion_client *client = filp->private_data;
+ struct ion_device *dev = client->dev;
+ struct ion_handle *cleanup_handle = NULL;
+ int ret = 0;
+ unsigned int dir;
+
+ union {
+ struct ion_fd_data fd;
+ struct ion_allocation_data allocation;
+ struct ion_handle_data handle;
+ struct ion_custom_data custom;
+ } data;
+
+ dir = ion_ioctl_dir(cmd);
+
+ if (_IOC_SIZE(cmd) > sizeof(data))
+ return -EINVAL;
+
+ if (dir & _IOC_WRITE)
+ if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case ION_IOC_ALLOC:
+ {
+ struct ion_handle *handle;
+
+ handle = ion_alloc(client, data.allocation.len,
+ data.allocation.align,
+ data.allocation.heap_id_mask,
+ data.allocation.flags);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ data.allocation.handle = handle->id;
+
+ cleanup_handle = handle;
+ break;
+ }
+ case ION_IOC_FREE:
+ {
+ struct ion_handle *handle;
+
+ handle = ion_handle_get_by_id(client, data.handle.handle);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ ion_free(client, handle);
+ ion_handle_put(handle);
+ break;
+ }
+ case ION_IOC_SHARE:
+ case ION_IOC_MAP:
+ {
+ struct ion_handle *handle;
+
+ handle = ion_handle_get_by_id(client, data.handle.handle);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ data.fd.fd = ion_share_dma_buf_fd(client, handle);
+ ion_handle_put(handle);
+ if (data.fd.fd < 0)
+ ret = data.fd.fd;
+ break;
+ }
+ case ION_IOC_IMPORT:
+ {
+ struct ion_handle *handle;
+ handle = ion_import_dma_buf(client, data.fd.fd);
+ if (IS_ERR(handle))
+ ret = PTR_ERR(handle);
+ else
+ data.handle.handle = handle->id;
+ break;
+ }
+ case ION_IOC_SYNC:
+ {
+ ret = ion_sync_for_device(client, data.fd.fd);
+ break;
+ }
+ case ION_IOC_CUSTOM:
+ {
+ if (!dev->custom_ioctl)
+ return -ENOTTY;
+ ret = dev->custom_ioctl(client, data.custom.cmd,
+ data.custom.arg);
+ break;
+ }
+ default:
+ return -ENOTTY;
+ }
+
+ if (dir & _IOC_READ) {
+ if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
+ if (cleanup_handle)
+ ion_free(client, cleanup_handle);
+ return -EFAULT;
+ }
+ }
+ return ret;
+}
+
+static int ion_release(struct inode *inode, struct file *file)
+{
+ struct ion_client *client = file->private_data;
+
+ pr_debug("%s: %d\n", __func__, __LINE__);
+ ion_client_destroy(client);
+ return 0;
+}
+
+static int ion_open(struct inode *inode, struct file *file)
+{
+ struct miscdevice *miscdev = file->private_data;
+ struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
+ struct ion_client *client;
+ char debug_name[64];
+
+ pr_debug("%s: %d\n", __func__, __LINE__);
+ snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
+ client = ion_client_create(dev, debug_name);
+ if (IS_ERR(client))
+ return PTR_ERR(client);
+ file->private_data = client;
+
+ return 0;
+}
+
+static const struct file_operations ion_fops = {
+ .owner = THIS_MODULE,
+ .open = ion_open,
+ .release = ion_release,
+ .unlocked_ioctl = ion_ioctl,
+ .compat_ioctl = compat_ion_ioctl,
+};
+
+static size_t ion_debug_heap_total(struct ion_client *client,
+ unsigned int id)
+{
+ size_t size = 0;
+ struct rb_node *n;
+
+ mutex_lock(&client->lock);
+ for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+ struct ion_handle *handle = rb_entry(n,
+ struct ion_handle,
+ node);
+ if (handle->buffer->heap->id == id)
+ size += handle->buffer->size;
+ }
+ mutex_unlock(&client->lock);
+ return size;
+}
+
+static int ion_debug_heap_show(struct seq_file *s, void *unused)
+{
+ struct ion_heap *heap = s->private;
+ struct ion_device *dev = heap->dev;
+ struct rb_node *n;
+ size_t total_size = 0;
+ size_t total_orphaned_size = 0;
+
+ seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
+ seq_printf(s, "----------------------------------------------------\n");
+
+ for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
+ struct ion_client *client = rb_entry(n, struct ion_client,
+ node);
+ size_t size = ion_debug_heap_total(client, heap->id);
+ if (!size)
+ continue;
+ if (client->task) {
+ char task_comm[TASK_COMM_LEN];
+
+ get_task_comm(task_comm, client->task);
+ seq_printf(s, "%16.s %16u %16zu\n", task_comm,
+ client->pid, size);
+ } else {
+ seq_printf(s, "%16.s %16u %16zu\n", client->name,
+ client->pid, size);
+ }
+ }
+ seq_printf(s, "----------------------------------------------------\n");
+ seq_printf(s, "orphaned allocations (info is from last known client):"
+ "\n");
+ mutex_lock(&dev->buffer_lock);
+ for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
+ struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
+ node);
+ if (buffer->heap->id != heap->id)
+ continue;
+ total_size += buffer->size;
+ if (!buffer->handle_count) {
+ seq_printf(s, "%16.s %16u %16zu %d %d\n",
+ buffer->task_comm, buffer->pid,
+ buffer->size, buffer->kmap_cnt,
+ atomic_read(&buffer->ref.refcount));
+ total_orphaned_size += buffer->size;
+ }
+ }
+ mutex_unlock(&dev->buffer_lock);
+ seq_printf(s, "----------------------------------------------------\n");
+ seq_printf(s, "%16.s %16zu\n", "total orphaned",
+ total_orphaned_size);
+ seq_printf(s, "%16.s %16zu\n", "total ", total_size);
+ if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+ seq_printf(s, "%16.s %16zu\n", "deferred free",
+ heap->free_list_size);
+ seq_printf(s, "----------------------------------------------------\n");
+
+ if (heap->debug_show)
+ heap->debug_show(heap, s, unused);
+
+ return 0;
+}
+
+static int ion_debug_heap_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ion_debug_heap_show, inode->i_private);
+}
+
+static const struct file_operations debug_heap_fops = {
+ .open = ion_debug_heap_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#ifdef DEBUG_HEAP_SHRINKER
+static int debug_shrink_set(void *data, u64 val)
+{
+ struct ion_heap *heap = data;
+ struct shrink_control sc;
+ int objs;
+
+ sc.gfp_mask = -1;
+ sc.nr_to_scan = 0;
+
+ if (!val)
+ return 0;
+
+ objs = heap->shrinker.shrink(&heap->shrinker, &sc);
+ sc.nr_to_scan = objs;
+
+ heap->shrinker.shrink(&heap->shrinker, &sc);
+ return 0;
+}
+
+static int debug_shrink_get(void *data, u64 *val)
+{
+ struct ion_heap *heap = data;
+ struct shrink_control sc;
+ int objs;
+
+ sc.gfp_mask = -1;
+ sc.nr_to_scan = 0;
+
+ objs = heap->shrinker.shrink(&heap->shrinker, &sc);
+ *val = objs;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
+ debug_shrink_set, "%llu\n");
+#endif
+
+void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
+{
+ struct dentry *debug_file;
+
+ if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
+ !heap->ops->unmap_dma)
+ pr_err("%s: can not add heap with invalid ops struct.\n",
+ __func__);
+
+ if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+ ion_heap_init_deferred_free(heap);
+
+ if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
+ ion_heap_init_shrinker(heap);
+
+ heap->dev = dev;
+ down_write(&dev->lock);
+ /* use negative heap->id to reverse the priority -- when traversing
+ the list later attempt higher id numbers first */
+ plist_node_init(&heap->node, -heap->id);
+ plist_add(&heap->node, &dev->heaps);
+ debug_file = debugfs_create_file(heap->name, 0664,
+ dev->heaps_debug_root, heap,
+ &debug_heap_fops);
+
+ if (!debug_file) {
+ char buf[256], *path;
+ path = dentry_path(dev->heaps_debug_root, buf, 256);
+ pr_err("Failed to create heap debugfs at %s/%s\n",
+ path, heap->name);
+ }
+
+#ifdef DEBUG_HEAP_SHRINKER
+ if (heap->shrinker.shrink) {
+ char debug_name[64];
+
+ snprintf(debug_name, 64, "%s_shrink", heap->name);
+ debug_file = debugfs_create_file(
+ debug_name, 0644, dev->heaps_debug_root, heap,
+ &debug_shrink_fops);
+ if (!debug_file) {
+ char buf[256], *path;
+ path = dentry_path(dev->heaps_debug_root, buf, 256);
+ pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
+ path, debug_name);
+ }
+ }
+#endif
+ up_write(&dev->lock);
+}
+
+struct ion_device *ion_device_create(long (*custom_ioctl)
+ (struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg))
+{
+ struct ion_device *idev;
+ int ret;
+
+ idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
+ if (!idev)
+ return ERR_PTR(-ENOMEM);
+
+ idev->dev.minor = MISC_DYNAMIC_MINOR;
+ idev->dev.name = "ion";
+ idev->dev.fops = &ion_fops;
+ idev->dev.parent = NULL;
+ ret = misc_register(&idev->dev);
+ if (ret) {
+ pr_err("ion: failed to register misc device.\n");
+ return ERR_PTR(ret);
+ }
+
+ idev->debug_root = debugfs_create_dir("ion", NULL);
+ if (!idev->debug_root) {
+ pr_err("ion: failed to create debugfs root directory.\n");
+ goto debugfs_done;
+ }
+ idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
+ if (!idev->heaps_debug_root) {
+ pr_err("ion: failed to create debugfs heaps directory.\n");
+ goto debugfs_done;
+ }
+ idev->clients_debug_root = debugfs_create_dir("clients",
+ idev->debug_root);
+ if (!idev->clients_debug_root)
+ pr_err("ion: failed to create debugfs clients directory.\n");
+
+debugfs_done:
+
+ idev->custom_ioctl = custom_ioctl;
+ idev->buffers = RB_ROOT;
+ mutex_init(&idev->buffer_lock);
+ init_rwsem(&idev->lock);
+ plist_head_init(&idev->heaps);
+ idev->clients = RB_ROOT;
+ return idev;
+}
+
+void ion_device_destroy(struct ion_device *dev)
+{
+ misc_deregister(&dev->dev);
+ debugfs_remove_recursive(dev->debug_root);
+ /* XXX need to free the heaps and clients ? */
+ kfree(dev);
+}
+
+void __init ion_reserve(struct ion_platform_data *data)
+{
+ int i;
+
+ for (i = 0; i < data->nr; i++) {
+ if (data->heaps[i].size == 0)
+ continue;
+
+ if (data->heaps[i].base == 0) {
+ phys_addr_t paddr;
+ paddr = memblock_alloc_base(data->heaps[i].size,
+ data->heaps[i].align,
+ MEMBLOCK_ALLOC_ANYWHERE);
+ if (!paddr) {
+ pr_err("%s: error allocating memblock for "
+ "heap %d\n",
+ __func__, i);
+ continue;
+ }
+ data->heaps[i].base = paddr;
+ } else {
+ int ret = memblock_reserve(data->heaps[i].base,
+ data->heaps[i].size);
+ if (ret)
+ pr_err("memblock reserve of %zx@%lx failed\n",
+ data->heaps[i].size,
+ data->heaps[i].base);
+ }
+ pr_info("%s: %s reserved base %lx size %zu\n", __func__,
+ data->heaps[i].name,
+ data->heaps[i].base,
+ data->heaps[i].size);
+ }
+}
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
new file mode 100644
index 00000000000..dcd2a0cdb19
--- /dev/null
+++ b/drivers/staging/android/ion/ion.h
@@ -0,0 +1,204 @@
+/*
+ * drivers/staging/android/ion/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ION_H
+#define _LINUX_ION_H
+
+#include <linux/types.h>
+
+#include "../uapi/ion.h"
+
+struct ion_handle;
+struct ion_device;
+struct ion_heap;
+struct ion_mapper;
+struct ion_client;
+struct ion_buffer;
+
+/* This should be removed some day when phys_addr_t's are fully
+ plumbed in the kernel, and all instances of ion_phys_addr_t should
+ be converted to phys_addr_t. For the time being many kernel interfaces
+ do not accept phys_addr_t's that would have to */
+#define ion_phys_addr_t unsigned long
+
+/**
+ * struct ion_platform_heap - defines a heap in the given platform
+ * @type: type of the heap from ion_heap_type enum
+ * @id: unique identifier for heap. When allocating higher numbers
+ * will be allocated from first. At allocation these are passed
+ * as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS.
+ * @name: used for debug purposes
+ * @base: base address of heap in physical memory if applicable
+ * @size: size of the heap in bytes if applicable
+ * @align: required alignment in physical memory if applicable
+ * @priv: private info passed from the board file
+ *
+ * Provided by the board file.
+ */
+struct ion_platform_heap {
+ enum ion_heap_type type;
+ unsigned int id;
+ const char *name;
+ ion_phys_addr_t base;
+ size_t size;
+ ion_phys_addr_t align;
+ void *priv;
+};
+
+/**
+ * struct ion_platform_data - array of platform heaps passed from board file
+ * @nr: number of structures in the array
+ * @heaps: array of platform_heap structions
+ *
+ * Provided by the board file in the form of platform data to a platform device.
+ */
+struct ion_platform_data {
+ int nr;
+ struct ion_platform_heap *heaps;
+};
+
+/**
+ * ion_reserve() - reserve memory for ion heaps if applicable
+ * @data: platform data specifying starting physical address and
+ * size
+ *
+ * Calls memblock reserve to set aside memory for heaps that are
+ * located at specific memory addresses or of specfic sizes not
+ * managed by the kernel
+ */
+void ion_reserve(struct ion_platform_data *data);
+
+/**
+ * ion_client_create() - allocate a client and returns it
+ * @dev: the global ion device
+ * @heap_type_mask: mask of heaps this client can allocate from
+ * @name: used for debugging
+ */
+struct ion_client *ion_client_create(struct ion_device *dev,
+ const char *name);
+
+/**
+ * ion_client_destroy() - free's a client and all it's handles
+ * @client: the client
+ *
+ * Free the provided client and all it's resources including
+ * any handles it is holding.
+ */
+void ion_client_destroy(struct ion_client *client);
+
+/**
+ * ion_alloc - allocate ion memory
+ * @client: the client
+ * @len: size of the allocation
+ * @align: requested allocation alignment, lots of hardware blocks
+ * have alignment requirements of some kind
+ * @heap_id_mask: mask of heaps to allocate from, if multiple bits are set
+ * heaps will be tried in order from highest to lowest
+ * id
+ * @flags: heap flags, the low 16 bits are consumed by ion, the
+ * high 16 bits are passed on to the respective heap and
+ * can be heap custom
+ *
+ * Allocate memory in one of the heaps provided in heap mask and return
+ * an opaque handle to it.
+ */
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+ size_t align, unsigned int heap_id_mask,
+ unsigned int flags);
+
+/**
+ * ion_free - free a handle
+ * @client: the client
+ * @handle: the handle to free
+ *
+ * Free the provided handle.
+ */
+void ion_free(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_phys - returns the physical address and len of a handle
+ * @client: the client
+ * @handle: the handle
+ * @addr: a pointer to put the address in
+ * @len: a pointer to put the length in
+ *
+ * This function queries the heap for a particular handle to get the
+ * handle's physical address. It't output is only correct if
+ * a heap returns physically contiguous memory -- in other cases
+ * this api should not be implemented -- ion_sg_table should be used
+ * instead. Returns -EINVAL if the handle is invalid. This has
+ * no implications on the reference counting of the handle --
+ * the returned value may not be valid if the caller is not
+ * holding a reference.
+ */
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len);
+
+/**
+ * ion_map_dma - return an sg_table describing a handle
+ * @client: the client
+ * @handle: the handle
+ *
+ * This function returns the sg_table describing
+ * a particular ion handle.
+ */
+struct sg_table *ion_sg_table(struct ion_client *client,
+ struct ion_handle *handle);
+
+/**
+ * ion_map_kernel - create mapping for the given handle
+ * @client: the client
+ * @handle: handle to map
+ *
+ * Map the given handle into the kernel and return a kernel address that
+ * can be used to access this address.
+ */
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_unmap_kernel() - destroy a kernel mapping for a handle
+ * @client: the client
+ * @handle: handle to unmap
+ */
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_share_dma_buf() - share buffer as dma-buf
+ * @client: the client
+ * @handle: the handle
+ */
+struct dma_buf *ion_share_dma_buf(struct ion_client *client,
+ struct ion_handle *handle);
+
+/**
+ * ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd
+ * @client: the client
+ * @handle: the handle
+ */
+int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle
+ * @client: the client
+ * @fd: the dma-buf fd
+ *
+ * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf,
+ * import that fd and return a handle representing it. If a dma-buf from
+ * another exporter is passed in this function will return ERR_PTR(-EINVAL)
+ */
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
+
+#endif /* _LINUX_ION_H */
diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c
new file mode 100644
index 00000000000..5165de2ce34
--- /dev/null
+++ b/drivers/staging/android/ion/ion_carveout_heap.c
@@ -0,0 +1,194 @@
+/*
+ * drivers/gpu/ion/ion_carveout_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+struct ion_carveout_heap {
+ struct ion_heap heap;
+ struct gen_pool *pool;
+ ion_phys_addr_t base;
+};
+
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
+ unsigned long size,
+ unsigned long align)
+{
+ struct ion_carveout_heap *carveout_heap =
+ container_of(heap, struct ion_carveout_heap, heap);
+ unsigned long offset = gen_pool_alloc(carveout_heap->pool, size);
+
+ if (!offset)
+ return ION_CARVEOUT_ALLOCATE_FAIL;
+
+ return offset;
+}
+
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+ unsigned long size)
+{
+ struct ion_carveout_heap *carveout_heap =
+ container_of(heap, struct ion_carveout_heap, heap);
+
+ if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
+ return;
+ gen_pool_free(carveout_heap->pool, addr, size);
+}
+
+static int ion_carveout_heap_phys(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+
+ *addr = paddr;
+ *len = buffer->size;
+ return 0;
+}
+
+static int ion_carveout_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
+{
+ struct sg_table *table;
+ ion_phys_addr_t paddr;
+ int ret;
+
+ if (align > PAGE_SIZE)
+ return -EINVAL;
+
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret)
+ goto err_free;
+
+ paddr = ion_carveout_allocate(heap, size, align);
+ if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
+ ret = -ENOMEM;
+ goto err_free_table;
+ }
+
+ sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
+ buffer->priv_virt = table;
+
+ return 0;
+
+err_free_table:
+ sg_free_table(table);
+err_free:
+ kfree(table);
+ return ret;
+}
+
+static void ion_carveout_heap_free(struct ion_buffer *buffer)
+{
+ struct ion_heap *heap = buffer->heap;
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+
+ ion_heap_buffer_zero(buffer);
+
+ if (ion_buffer_cached(buffer))
+ dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+ DMA_BIDIRECTIONAL);
+
+ ion_carveout_free(heap, paddr, buffer->size);
+ sg_free_table(table);
+ kfree(table);
+}
+
+static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+static void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return;
+}
+
+static struct ion_heap_ops carveout_heap_ops = {
+ .allocate = ion_carveout_heap_allocate,
+ .free = ion_carveout_heap_free,
+ .phys = ion_carveout_heap_phys,
+ .map_dma = ion_carveout_heap_map_dma,
+ .unmap_dma = ion_carveout_heap_unmap_dma,
+ .map_user = ion_heap_map_user,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
+};
+
+struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
+{
+ struct ion_carveout_heap *carveout_heap;
+ int ret;
+
+ struct page *page;
+ size_t size;
+
+ page = pfn_to_page(PFN_DOWN(heap_data->base));
+ size = heap_data->size;
+
+ ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
+
+ ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
+ if (ret)
+ return ERR_PTR(ret);
+
+ carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
+ if (!carveout_heap)
+ return ERR_PTR(-ENOMEM);
+
+ carveout_heap->pool = gen_pool_create(12, -1);
+ if (!carveout_heap->pool) {
+ kfree(carveout_heap);
+ return ERR_PTR(-ENOMEM);
+ }
+ carveout_heap->base = heap_data->base;
+ gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size,
+ -1);
+ carveout_heap->heap.ops = &carveout_heap_ops;
+ carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
+ carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+
+ return &carveout_heap->heap;
+}
+
+void ion_carveout_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_carveout_heap *carveout_heap =
+ container_of(heap, struct ion_carveout_heap, heap);
+
+ gen_pool_destroy(carveout_heap->pool);
+ kfree(carveout_heap);
+ carveout_heap = NULL;
+}
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
new file mode 100644
index 00000000000..ca20d627960
--- /dev/null
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -0,0 +1,195 @@
+/*
+ * drivers/gpu/ion/ion_chunk_heap.c
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+struct ion_chunk_heap {
+ struct ion_heap heap;
+ struct gen_pool *pool;
+ ion_phys_addr_t base;
+ unsigned long chunk_size;
+ unsigned long size;
+ unsigned long allocated;
+};
+
+static int ion_chunk_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
+{
+ struct ion_chunk_heap *chunk_heap =
+ container_of(heap, struct ion_chunk_heap, heap);
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int ret, i;
+ unsigned long num_chunks;
+ unsigned long allocated_size;
+
+ if (align > chunk_heap->chunk_size)
+ return -EINVAL;
+
+ allocated_size = ALIGN(size, chunk_heap->chunk_size);
+ num_chunks = allocated_size / chunk_heap->chunk_size;
+
+ if (allocated_size > chunk_heap->size - chunk_heap->allocated)
+ return -ENOMEM;
+
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+ ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
+ if (ret) {
+ kfree(table);
+ return ret;
+ }
+
+ sg = table->sgl;
+ for (i = 0; i < num_chunks; i++) {
+ unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
+ chunk_heap->chunk_size);
+ if (!paddr)
+ goto err;
+ sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)),
+ chunk_heap->chunk_size, 0);
+ sg = sg_next(sg);
+ }
+
+ buffer->priv_virt = table;
+ chunk_heap->allocated += allocated_size;
+ return 0;
+err:
+ sg = table->sgl;
+ for (i -= 1; i >= 0; i--) {
+ gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+ sg->length);
+ sg = sg_next(sg);
+ }
+ sg_free_table(table);
+ kfree(table);
+ return -ENOMEM;
+}
+
+static void ion_chunk_heap_free(struct ion_buffer *buffer)
+{
+ struct ion_heap *heap = buffer->heap;
+ struct ion_chunk_heap *chunk_heap =
+ container_of(heap, struct ion_chunk_heap, heap);
+ struct sg_table *table = buffer->priv_virt;
+ struct scatterlist *sg;
+ int i;
+ unsigned long allocated_size;
+
+ allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
+
+ ion_heap_buffer_zero(buffer);
+
+ if (ion_buffer_cached(buffer))
+ dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+ DMA_BIDIRECTIONAL);
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+ sg->length);
+ }
+ chunk_heap->allocated -= allocated_size;
+ sg_free_table(table);
+ kfree(table);
+}
+
+static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return;
+}
+
+static struct ion_heap_ops chunk_heap_ops = {
+ .allocate = ion_chunk_heap_allocate,
+ .free = ion_chunk_heap_free,
+ .map_dma = ion_chunk_heap_map_dma,
+ .unmap_dma = ion_chunk_heap_unmap_dma,
+ .map_user = ion_heap_map_user,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
+};
+
+struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
+{
+ struct ion_chunk_heap *chunk_heap;
+ int ret;
+ struct page *page;
+ size_t size;
+
+ page = pfn_to_page(PFN_DOWN(heap_data->base));
+ size = heap_data->size;
+
+ ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
+
+ ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
+ if (ret)
+ return ERR_PTR(ret);
+
+ chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
+ if (!chunk_heap)
+ return ERR_PTR(-ENOMEM);
+
+ chunk_heap->chunk_size = (unsigned long)heap_data->priv;
+ chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
+ PAGE_SHIFT, -1);
+ if (!chunk_heap->pool) {
+ ret = -ENOMEM;
+ goto error_gen_pool_create;
+ }
+ chunk_heap->base = heap_data->base;
+ chunk_heap->size = heap_data->size;
+ chunk_heap->allocated = 0;
+
+ gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
+ chunk_heap->heap.ops = &chunk_heap_ops;
+ chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
+ chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+ pr_info("%s: base %lu size %zu align %ld\n", __func__, chunk_heap->base,
+ heap_data->size, heap_data->align);
+
+ return &chunk_heap->heap;
+
+error_gen_pool_create:
+ kfree(chunk_heap);
+ return ERR_PTR(ret);
+}
+
+void ion_chunk_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_chunk_heap *chunk_heap =
+ container_of(heap, struct ion_chunk_heap, heap);
+
+ gen_pool_destroy(chunk_heap->pool);
+ kfree(chunk_heap);
+ chunk_heap = NULL;
+}
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
new file mode 100644
index 00000000000..4418bda7647
--- /dev/null
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -0,0 +1,218 @@
+/*
+ * drivers/gpu/ion/ion_cma_heap.c
+ *
+ * Copyright (C) Linaro 2012
+ * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+
+#include "ion.h"
+#include "ion_priv.h"
+
+#define ION_CMA_ALLOCATE_FAILED -1
+
+struct ion_cma_heap {
+ struct ion_heap heap;
+ struct device *dev;
+};
+
+#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
+
+struct ion_cma_buffer_info {
+ void *cpu_addr;
+ dma_addr_t handle;
+ struct sg_table *table;
+};
+
+/*
+ * Create scatter-list for the already allocated DMA buffer.
+ * This function could be replaced by dma_common_get_sgtable
+ * as soon as it will avalaible.
+ */
+static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t handle, size_t size)
+{
+ struct page *page = virt_to_page(cpu_addr);
+ int ret;
+
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ if (unlikely(ret))
+ return ret;
+
+ sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+ return 0;
+}
+
+/* ION CMA heap operations functions */
+static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
+ unsigned long len, unsigned long align,
+ unsigned long flags)
+{
+ struct ion_cma_heap *cma_heap = to_cma_heap(heap);
+ struct device *dev = cma_heap->dev;
+ struct ion_cma_buffer_info *info;
+
+ dev_dbg(dev, "Request buffer allocation len %ld\n", len);
+
+ if (buffer->flags & ION_FLAG_CACHED)
+ return -EINVAL;
+
+ if (align > PAGE_SIZE)
+ return -EINVAL;
+
+ info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
+ if (!info) {
+ dev_err(dev, "Can't allocate buffer info\n");
+ return ION_CMA_ALLOCATE_FAILED;
+ }
+
+ info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle),
+ GFP_HIGHUSER | __GFP_ZERO);
+
+ if (!info->cpu_addr) {
+ dev_err(dev, "Fail to allocate buffer\n");
+ goto err;
+ }
+
+ info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!info->table) {
+ dev_err(dev, "Fail to allocate sg table\n");
+ goto free_mem;
+ }
+
+ if (ion_cma_get_sgtable
+ (dev, info->table, info->cpu_addr, info->handle, len))
+ goto free_table;
+ /* keep this for memory release */
+ buffer->priv_virt = info;
+ dev_dbg(dev, "Allocate buffer %p\n", buffer);
+ return 0;
+
+free_table:
+ kfree(info->table);
+free_mem:
+ dma_free_coherent(dev, len, info->cpu_addr, info->handle);
+err:
+ kfree(info);
+ return ION_CMA_ALLOCATE_FAILED;
+}
+
+static void ion_cma_free(struct ion_buffer *buffer)
+{
+ struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
+ struct device *dev = cma_heap->dev;
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ dev_dbg(dev, "Release buffer %p\n", buffer);
+ /* release memory */
+ dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
+ /* release sg table */
+ sg_free_table(info->table);
+ kfree(info->table);
+ kfree(info);
+}
+
+/* return physical address in addr */
+static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
+ struct device *dev = cma_heap->dev;
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer,
+ &info->handle);
+
+ *addr = info->handle;
+ *len = buffer->size;
+
+ return 0;
+}
+
+static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ return info->table;
+}
+
+static void ion_cma_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return;
+}
+
+static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
+ struct vm_area_struct *vma)
+{
+ struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
+ struct device *dev = cma_heap->dev;
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle,
+ buffer->size);
+}
+
+static void *ion_cma_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+ /* kernel memory mapping has been done at allocation time */
+ return info->cpu_addr;
+}
+
+static void ion_cma_unmap_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+}
+
+static struct ion_heap_ops ion_cma_ops = {
+ .allocate = ion_cma_allocate,
+ .free = ion_cma_free,
+ .map_dma = ion_cma_heap_map_dma,
+ .unmap_dma = ion_cma_heap_unmap_dma,
+ .phys = ion_cma_phys,
+ .map_user = ion_cma_mmap,
+ .map_kernel = ion_cma_map_kernel,
+ .unmap_kernel = ion_cma_unmap_kernel,
+};
+
+struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
+{
+ struct ion_cma_heap *cma_heap;
+
+ cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL);
+
+ if (!cma_heap)
+ return ERR_PTR(-ENOMEM);
+
+ cma_heap->heap.ops = &ion_cma_ops;
+ /* get device from private heaps data, later it will be
+ * used to make the link with reserved CMA memory */
+ cma_heap->dev = data->priv;
+ cma_heap->heap.type = ION_HEAP_TYPE_DMA;
+ return &cma_heap->heap;
+}
+
+void ion_cma_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_cma_heap *cma_heap = to_cma_heap(heap);
+
+ kfree(cma_heap);
+}
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
new file mode 100644
index 00000000000..55b2002753f
--- /dev/null
+++ b/drivers/staging/android/ion/ion_dummy_driver.c
@@ -0,0 +1,158 @@
+/*
+ * drivers/gpu/ion/ion_dummy_driver.c
+ *
+ * Copyright (C) 2013 Linaro, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/bootmem.h>
+#include <linux/memblock.h>
+#include <linux/sizes.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+struct ion_device *idev;
+struct ion_heap **heaps;
+
+void *carveout_ptr;
+void *chunk_ptr;
+
+struct ion_platform_heap dummy_heaps[] = {
+ {
+ .id = ION_HEAP_TYPE_SYSTEM,
+ .type = ION_HEAP_TYPE_SYSTEM,
+ .name = "system",
+ },
+ {
+ .id = ION_HEAP_TYPE_SYSTEM_CONTIG,
+ .type = ION_HEAP_TYPE_SYSTEM_CONTIG,
+ .name = "system contig",
+ },
+ {
+ .id = ION_HEAP_TYPE_CARVEOUT,
+ .type = ION_HEAP_TYPE_CARVEOUT,
+ .name = "carveout",
+ .size = SZ_4M,
+ },
+ {
+ .id = ION_HEAP_TYPE_CHUNK,
+ .type = ION_HEAP_TYPE_CHUNK,
+ .name = "chunk",
+ .size = SZ_4M,
+ .align = SZ_16K,
+ .priv = (void *)(SZ_16K),
+ },
+};
+
+struct ion_platform_data dummy_ion_pdata = {
+ .nr = 4,
+ .heaps = dummy_heaps,
+};
+
+static int __init ion_dummy_init(void)
+{
+ int i, err;
+
+ idev = ion_device_create(NULL);
+ heaps = kzalloc(sizeof(struct ion_heap *) * dummy_ion_pdata.nr,
+ GFP_KERNEL);
+ if (!heaps)
+ return PTR_ERR(heaps);
+
+
+ /* Allocate a dummy carveout heap */
+ carveout_ptr = alloc_pages_exact(
+ dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size,
+ GFP_KERNEL);
+ if (carveout_ptr)
+ dummy_heaps[ION_HEAP_TYPE_CARVEOUT].base =
+ virt_to_phys(carveout_ptr);
+ else
+ pr_err("ion_dummy: Could not allocate carveout\n");
+
+ /* Allocate a dummy chunk heap */
+ chunk_ptr = alloc_pages_exact(
+ dummy_heaps[ION_HEAP_TYPE_CHUNK].size,
+ GFP_KERNEL);
+ if (chunk_ptr)
+ dummy_heaps[ION_HEAP_TYPE_CHUNK].base = virt_to_phys(chunk_ptr);
+ else
+ pr_err("ion_dummy: Could not allocate chunk\n");
+
+ for (i = 0; i < dummy_ion_pdata.nr; i++) {
+ struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i];
+
+ if (heap_data->type == ION_HEAP_TYPE_CARVEOUT &&
+ !heap_data->base)
+ continue;
+
+ if (heap_data->type == ION_HEAP_TYPE_CHUNK && !heap_data->base)
+ continue;
+
+ heaps[i] = ion_heap_create(heap_data);
+ if (IS_ERR_OR_NULL(heaps[i])) {
+ err = PTR_ERR(heaps[i]);
+ goto err;
+ }
+ ion_device_add_heap(idev, heaps[i]);
+ }
+ return 0;
+err:
+ for (i = 0; i < dummy_ion_pdata.nr; i++) {
+ if (heaps[i])
+ ion_heap_destroy(heaps[i]);
+ }
+ kfree(heaps);
+
+ if (carveout_ptr) {
+ free_pages_exact(carveout_ptr,
+ dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
+ carveout_ptr = NULL;
+ }
+ if (chunk_ptr) {
+ free_pages_exact(chunk_ptr,
+ dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
+ chunk_ptr = NULL;
+ }
+ return err;
+}
+
+static void __exit ion_dummy_exit(void)
+{
+ int i;
+
+ ion_device_destroy(idev);
+
+ for (i = 0; i < dummy_ion_pdata.nr; i++)
+ ion_heap_destroy(heaps[i]);
+ kfree(heaps);
+
+ if (carveout_ptr) {
+ free_pages_exact(carveout_ptr,
+ dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
+ carveout_ptr = NULL;
+ }
+ if (chunk_ptr) {
+ free_pages_exact(chunk_ptr,
+ dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
+ chunk_ptr = NULL;
+ }
+
+ return;
+}
+
+module_init(ion_dummy_init);
+module_exit(ion_dummy_exit);
+
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
new file mode 100644
index 00000000000..750b76af0cf
--- /dev/null
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -0,0 +1,369 @@
+/*
+ * drivers/gpu/ion/ion_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/rtmutex.h>
+#include <linux/sched.h>
+#include <linux/scatterlist.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+void *ion_heap_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct scatterlist *sg;
+ int i, j;
+ void *vaddr;
+ pgprot_t pgprot;
+ struct sg_table *table = buffer->sg_table;
+ int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+ struct page **pages = vmalloc(sizeof(struct page *) * npages);
+ struct page **tmp = pages;
+
+ if (!pages)
+ return NULL;
+
+ if (buffer->flags & ION_FLAG_CACHED)
+ pgprot = PAGE_KERNEL;
+ else
+ pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
+ struct page *page = sg_page(sg);
+ BUG_ON(i >= npages);
+ for (j = 0; j < npages_this_entry; j++)
+ *(tmp++) = page++;
+ }
+ vaddr = vmap(pages, npages, VM_MAP, pgprot);
+ vfree(pages);
+
+ if (vaddr == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ return vaddr;
+}
+
+void ion_heap_unmap_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ vunmap(buffer->vaddr);
+}
+
+int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+ struct vm_area_struct *vma)
+{
+ struct sg_table *table = buffer->sg_table;
+ unsigned long addr = vma->vm_start;
+ unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
+ struct scatterlist *sg;
+ int i;
+ int ret;
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
+ unsigned long remainder = vma->vm_end - addr;
+ unsigned long len = sg->length;
+
+ if (offset >= sg->length) {
+ offset -= sg->length;
+ continue;
+ } else if (offset) {
+ page += offset / PAGE_SIZE;
+ len = sg->length - offset;
+ offset = 0;
+ }
+ len = min(len, remainder);
+ ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
+ vma->vm_page_prot);
+ if (ret)
+ return ret;
+ addr += len;
+ if (addr >= vma->vm_end)
+ return 0;
+ }
+ return 0;
+}
+
+static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
+{
+ void *addr = vm_map_ram(pages, num, -1, pgprot);
+ if (!addr)
+ return -ENOMEM;
+ memset(addr, 0, PAGE_SIZE * num);
+ vm_unmap_ram(addr, num);
+
+ return 0;
+}
+
+static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
+ pgprot_t pgprot)
+{
+ int p = 0;
+ int ret = 0;
+ struct sg_page_iter piter;
+ struct page *pages[32];
+
+ for_each_sg_page(sgl, &piter, nents, 0) {
+ pages[p++] = sg_page_iter_page(&piter);
+ if (p == ARRAY_SIZE(pages)) {
+ ret = ion_heap_clear_pages(pages, p, pgprot);
+ if (ret)
+ return ret;
+ p = 0;
+ }
+ }
+ if (p)
+ ret = ion_heap_clear_pages(pages, p, pgprot);
+
+ return ret;
+}
+
+int ion_heap_buffer_zero(struct ion_buffer *buffer)
+{
+ struct sg_table *table = buffer->sg_table;
+ pgprot_t pgprot;
+
+ if (buffer->flags & ION_FLAG_CACHED)
+ pgprot = PAGE_KERNEL;
+ else
+ pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+ return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
+}
+
+int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
+{
+ struct scatterlist sg;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, page, size, 0);
+ return ion_heap_sglist_zero(&sg, 1, pgprot);
+}
+
+void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
+{
+ spin_lock(&heap->free_lock);
+ list_add(&buffer->list, &heap->free_list);
+ heap->free_list_size += buffer->size;
+ spin_unlock(&heap->free_lock);
+ wake_up(&heap->waitqueue);
+}
+
+size_t ion_heap_freelist_size(struct ion_heap *heap)
+{
+ size_t size;
+
+ spin_lock(&heap->free_lock);
+ size = heap->free_list_size;
+ spin_unlock(&heap->free_lock);
+
+ return size;
+}
+
+static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
+ bool skip_pools)
+{
+ struct ion_buffer *buffer;
+ size_t total_drained = 0;
+
+ if (ion_heap_freelist_size(heap) == 0)
+ return 0;
+
+ spin_lock(&heap->free_lock);
+ if (size == 0)
+ size = heap->free_list_size;
+
+ while (!list_empty(&heap->free_list)) {
+ if (total_drained >= size)
+ break;
+ buffer = list_first_entry(&heap->free_list, struct ion_buffer,
+ list);
+ list_del(&buffer->list);
+ heap->free_list_size -= buffer->size;
+ if (skip_pools)
+ buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
+ total_drained += buffer->size;
+ spin_unlock(&heap->free_lock);
+ ion_buffer_destroy(buffer);
+ spin_lock(&heap->free_lock);
+ }
+ spin_unlock(&heap->free_lock);
+
+ return total_drained;
+}
+
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
+{
+ return _ion_heap_freelist_drain(heap, size, false);
+}
+
+size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
+{
+ return _ion_heap_freelist_drain(heap, size, true);
+}
+
+static int ion_heap_deferred_free(void *data)
+{
+ struct ion_heap *heap = data;
+
+ while (true) {
+ struct ion_buffer *buffer;
+
+ wait_event_freezable(heap->waitqueue,
+ ion_heap_freelist_size(heap) > 0);
+
+ spin_lock(&heap->free_lock);
+ if (list_empty(&heap->free_list)) {
+ spin_unlock(&heap->free_lock);
+ continue;
+ }
+ buffer = list_first_entry(&heap->free_list, struct ion_buffer,
+ list);
+ list_del(&buffer->list);
+ heap->free_list_size -= buffer->size;
+ spin_unlock(&heap->free_lock);
+ ion_buffer_destroy(buffer);
+ }
+
+ return 0;
+}
+
+int ion_heap_init_deferred_free(struct ion_heap *heap)
+{
+ struct sched_param param = { .sched_priority = 0 };
+
+ INIT_LIST_HEAD(&heap->free_list);
+ heap->free_list_size = 0;
+ spin_lock_init(&heap->free_lock);
+ init_waitqueue_head(&heap->waitqueue);
+ heap->task = kthread_run(ion_heap_deferred_free, heap,
+ "%s", heap->name);
+ sched_setscheduler(heap->task, SCHED_IDLE, &param);
+ if (IS_ERR(heap->task)) {
+ pr_err("%s: creating thread for deferred free failed\n",
+ __func__);
+ return PTR_RET(heap->task);
+ }
+ return 0;
+}
+
+static int ion_heap_shrink(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct ion_heap *heap = container_of(shrinker, struct ion_heap,
+ shrinker);
+ int total = 0;
+ int freed = 0;
+ int to_scan = sc->nr_to_scan;
+
+ if (to_scan == 0)
+ goto out;
+
+ /*
+ * shrink the free list first, no point in zeroing the memory if we're
+ * just going to reclaim it. Also, skip any possible page pooling.
+ */
+ if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+ freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
+ PAGE_SIZE;
+
+ to_scan -= freed;
+ if (to_scan < 0)
+ to_scan = 0;
+
+out:
+ total = ion_heap_freelist_size(heap) / PAGE_SIZE;
+ if (heap->ops->shrink)
+ total += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
+ return total;
+}
+
+void ion_heap_init_shrinker(struct ion_heap *heap)
+{
+ heap->shrinker.shrink = ion_heap_shrink;
+ heap->shrinker.seeks = DEFAULT_SEEKS;
+ heap->shrinker.batch = 0;
+ register_shrinker(&heap->shrinker);
+}
+
+struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
+{
+ struct ion_heap *heap = NULL;
+
+ switch (heap_data->type) {
+ case ION_HEAP_TYPE_SYSTEM_CONTIG:
+ heap = ion_system_contig_heap_create(heap_data);
+ break;
+ case ION_HEAP_TYPE_SYSTEM:
+ heap = ion_system_heap_create(heap_data);
+ break;
+ case ION_HEAP_TYPE_CARVEOUT:
+ heap = ion_carveout_heap_create(heap_data);
+ break;
+ case ION_HEAP_TYPE_CHUNK:
+ heap = ion_chunk_heap_create(heap_data);
+ break;
+ case ION_HEAP_TYPE_DMA:
+ heap = ion_cma_heap_create(heap_data);
+ break;
+ default:
+ pr_err("%s: Invalid heap type %d\n", __func__,
+ heap_data->type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (IS_ERR_OR_NULL(heap)) {
+ pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
+ __func__, heap_data->name, heap_data->type,
+ heap_data->base, heap_data->size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ heap->name = heap_data->name;
+ heap->id = heap_data->id;
+ return heap;
+}
+
+void ion_heap_destroy(struct ion_heap *heap)
+{
+ if (!heap)
+ return;
+
+ switch (heap->type) {
+ case ION_HEAP_TYPE_SYSTEM_CONTIG:
+ ion_system_contig_heap_destroy(heap);
+ break;
+ case ION_HEAP_TYPE_SYSTEM:
+ ion_system_heap_destroy(heap);
+ break;
+ case ION_HEAP_TYPE_CARVEOUT:
+ ion_carveout_heap_destroy(heap);
+ break;
+ case ION_HEAP_TYPE_CHUNK:
+ ion_chunk_heap_destroy(heap);
+ break;
+ case ION_HEAP_TYPE_DMA:
+ ion_cma_heap_destroy(heap);
+ break;
+ default:
+ pr_err("%s: Invalid heap type %d\n", __func__,
+ heap->type);
+ }
+}
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
new file mode 100644
index 00000000000..0e20e62ec4b
--- /dev/null
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -0,0 +1,190 @@
+/*
+ * drivers/gpu/ion/ion_mem_pool.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "ion_priv.h"
+
+struct ion_page_pool_item {
+ struct page *page;
+ struct list_head list;
+};
+
+static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
+{
+ struct page *page = alloc_pages(pool->gfp_mask, pool->order);
+
+ if (!page)
+ return NULL;
+ ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
+ DMA_BIDIRECTIONAL);
+ return page;
+}
+
+static void ion_page_pool_free_pages(struct ion_page_pool *pool,
+ struct page *page)
+{
+ __free_pages(page, pool->order);
+}
+
+static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
+{
+ struct ion_page_pool_item *item;
+
+ item = kmalloc(sizeof(struct ion_page_pool_item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+
+ mutex_lock(&pool->mutex);
+ item->page = page;
+ if (PageHighMem(page)) {
+ list_add_tail(&item->list, &pool->high_items);
+ pool->high_count++;
+ } else {
+ list_add_tail(&item->list, &pool->low_items);
+ pool->low_count++;
+ }
+ mutex_unlock(&pool->mutex);
+ return 0;
+}
+
+static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
+{
+ struct ion_page_pool_item *item;
+ struct page *page;
+
+ if (high) {
+ BUG_ON(!pool->high_count);
+ item = list_first_entry(&pool->high_items,
+ struct ion_page_pool_item, list);
+ pool->high_count--;
+ } else {
+ BUG_ON(!pool->low_count);
+ item = list_first_entry(&pool->low_items,
+ struct ion_page_pool_item, list);
+ pool->low_count--;
+ }
+
+ list_del(&item->list);
+ page = item->page;
+ kfree(item);
+ return page;
+}
+
+void *ion_page_pool_alloc(struct ion_page_pool *pool)
+{
+ struct page *page = NULL;
+
+ BUG_ON(!pool);
+
+ mutex_lock(&pool->mutex);
+ if (pool->high_count)
+ page = ion_page_pool_remove(pool, true);
+ else if (pool->low_count)
+ page = ion_page_pool_remove(pool, false);
+ mutex_unlock(&pool->mutex);
+
+ if (!page)
+ page = ion_page_pool_alloc_pages(pool);
+
+ return page;
+}
+
+void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
+{
+ int ret;
+
+ ret = ion_page_pool_add(pool, page);
+ if (ret)
+ ion_page_pool_free_pages(pool, page);
+}
+
+static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
+{
+ int total = 0;
+
+ total += high ? (pool->high_count + pool->low_count) *
+ (1 << pool->order) :
+ pool->low_count * (1 << pool->order);
+ return total;
+}
+
+int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
+ int nr_to_scan)
+{
+ int i;
+ bool high;
+
+ high = !!(gfp_mask & __GFP_HIGHMEM);
+
+ for (i = 0; i < nr_to_scan; i++) {
+ struct page *page;
+
+ mutex_lock(&pool->mutex);
+ if (pool->low_count) {
+ page = ion_page_pool_remove(pool, false);
+ } else if (high && pool->high_count) {
+ page = ion_page_pool_remove(pool, true);
+ } else {
+ mutex_unlock(&pool->mutex);
+ break;
+ }
+ mutex_unlock(&pool->mutex);
+ ion_page_pool_free_pages(pool, page);
+ }
+
+ return ion_page_pool_total(pool, high);
+}
+
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
+{
+ struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
+ GFP_KERNEL);
+ if (!pool)
+ return NULL;
+ pool->high_count = 0;
+ pool->low_count = 0;
+ INIT_LIST_HEAD(&pool->low_items);
+ INIT_LIST_HEAD(&pool->high_items);
+ pool->gfp_mask = gfp_mask;
+ pool->order = order;
+ mutex_init(&pool->mutex);
+ plist_node_init(&pool->list, order);
+
+ return pool;
+}
+
+void ion_page_pool_destroy(struct ion_page_pool *pool)
+{
+ kfree(pool);
+}
+
+static int __init ion_page_pool_init(void)
+{
+ return 0;
+}
+
+static void __exit ion_page_pool_exit(void)
+{
+}
+
+module_init(ion_page_pool_init);
+module_exit(ion_page_pool_exit);
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
new file mode 100644
index 00000000000..9bcd077f251
--- /dev/null
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -0,0 +1,405 @@
+/*
+ * drivers/gpu/ion/ion_priv.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ION_PRIV_H
+#define _ION_PRIV_H
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/kref.h>
+#include <linux/mm_types.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/shrinker.h>
+#include <linux/types.h>
+#include <linux/device.h>
+
+#include "ion.h"
+
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
+
+/**
+ * struct ion_buffer - metadata for a particular buffer
+ * @ref: refernce count
+ * @node: node in the ion_device buffers tree
+ * @dev: back pointer to the ion_device
+ * @heap: back pointer to the heap the buffer came from
+ * @flags: buffer specific flags
+ * @private_flags: internal buffer specific flags
+ * @size: size of the buffer
+ * @priv_virt: private data to the buffer representable as
+ * a void *
+ * @priv_phys: private data to the buffer representable as
+ * an ion_phys_addr_t (and someday a phys_addr_t)
+ * @lock: protects the buffers cnt fields
+ * @kmap_cnt: number of times the buffer is mapped to the kernel
+ * @vaddr: the kenrel mapping if kmap_cnt is not zero
+ * @dmap_cnt: number of times the buffer is mapped for dma
+ * @sg_table: the sg table for the buffer if dmap_cnt is not zero
+ * @pages: flat array of pages in the buffer -- used by fault
+ * handler and only valid for buffers that are faulted in
+ * @vmas: list of vma's mapping this buffer
+ * @handle_count: count of handles referencing this buffer
+ * @task_comm: taskcomm of last client to reference this buffer in a
+ * handle, used for debugging
+ * @pid: pid of last client to reference this buffer in a
+ * handle, used for debugging
+*/
+struct ion_buffer {
+ struct kref ref;
+ union {
+ struct rb_node node;
+ struct list_head list;
+ };
+ struct ion_device *dev;
+ struct ion_heap *heap;
+ unsigned long flags;
+ unsigned long private_flags;
+ size_t size;
+ union {
+ void *priv_virt;
+ ion_phys_addr_t priv_phys;
+ };
+ struct mutex lock;
+ int kmap_cnt;
+ void *vaddr;
+ int dmap_cnt;
+ struct sg_table *sg_table;
+ struct page **pages;
+ struct list_head vmas;
+ /* used to track orphaned buffers */
+ int handle_count;
+ char task_comm[TASK_COMM_LEN];
+ pid_t pid;
+};
+void ion_buffer_destroy(struct ion_buffer *buffer);
+
+/**
+ * struct ion_heap_ops - ops to operate on a given heap
+ * @allocate: allocate memory
+ * @free: free memory
+ * @phys get physical address of a buffer (only define on
+ * physically contiguous heaps)
+ * @map_dma map the memory for dma to a scatterlist
+ * @unmap_dma unmap the memory for dma
+ * @map_kernel map memory to the kernel
+ * @unmap_kernel unmap memory to the kernel
+ * @map_user map memory to userspace
+ *
+ * allocate, phys, and map_user return 0 on success, -errno on error.
+ * map_dma and map_kernel return pointer on success, ERR_PTR on
+ * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
+ * the buffer's private_flags when called from a shrinker. In that
+ * case, the pages being free'd must be truly free'd back to the
+ * system, not put in a page pool or otherwise cached.
+ */
+struct ion_heap_ops {
+ int (*allocate) (struct ion_heap *heap,
+ struct ion_buffer *buffer, unsigned long len,
+ unsigned long align, unsigned long flags);
+ void (*free) (struct ion_buffer *buffer);
+ int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len);
+ struct sg_table *(*map_dma) (struct ion_heap *heap,
+ struct ion_buffer *buffer);
+ void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
+ void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
+ void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
+ int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
+ struct vm_area_struct *vma);
+ int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
+};
+
+/**
+ * heap flags - flags between the heaps and core ion code
+ */
+#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
+
+/**
+ * private flags - flags internal to ion
+ */
+/*
+ * Buffer is being freed from a shrinker function. Skip any possible
+ * heap-specific caching mechanism (e.g. page pools). Guarantees that
+ * any buffer storage that came from the system allocator will be
+ * returned to the system allocator.
+ */
+#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0)
+
+/**
+ * struct ion_heap - represents a heap in the system
+ * @node: rb node to put the heap on the device's tree of heaps
+ * @dev: back pointer to the ion_device
+ * @type: type of heap
+ * @ops: ops struct as above
+ * @flags: flags
+ * @id: id of heap, also indicates priority of this heap when
+ * allocating. These are specified by platform data and
+ * MUST be unique
+ * @name: used for debugging
+ * @shrinker: a shrinker for the heap
+ * @free_list: free list head if deferred free is used
+ * @free_list_size size of the deferred free list in bytes
+ * @lock: protects the free list
+ * @waitqueue: queue to wait on from deferred free thread
+ * @task: task struct of deferred free thread
+ * @debug_show: called when heap debug file is read to add any
+ * heap specific debug info to output
+ *
+ * Represents a pool of memory from which buffers can be made. In some
+ * systems the only heap is regular system memory allocated via vmalloc.
+ * On others, some blocks might require large physically contiguous buffers
+ * that are allocated from a specially reserved heap.
+ */
+struct ion_heap {
+ struct plist_node node;
+ struct ion_device *dev;
+ enum ion_heap_type type;
+ struct ion_heap_ops *ops;
+ unsigned long flags;
+ unsigned int id;
+ const char *name;
+ struct shrinker shrinker;
+ struct list_head free_list;
+ size_t free_list_size;
+ spinlock_t free_lock;
+ wait_queue_head_t waitqueue;
+ struct task_struct *task;
+ int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
+};
+
+/**
+ * ion_buffer_cached - this ion buffer is cached
+ * @buffer: buffer
+ *
+ * indicates whether this ion buffer is cached
+ */
+bool ion_buffer_cached(struct ion_buffer *buffer);
+
+/**
+ * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
+ * @buffer: buffer
+ *
+ * indicates whether userspace mappings of this buffer will be faulted
+ * in, this can affect how buffers are allocated from the heap.
+ */
+bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
+
+/**
+ * ion_device_create - allocates and returns an ion device
+ * @custom_ioctl: arch specific ioctl function if applicable
+ *
+ * returns a valid device or -PTR_ERR
+ */
+struct ion_device *ion_device_create(long (*custom_ioctl)
+ (struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg));
+
+/**
+ * ion_device_destroy - free and device and it's resource
+ * @dev: the device
+ */
+void ion_device_destroy(struct ion_device *dev);
+
+/**
+ * ion_device_add_heap - adds a heap to the ion device
+ * @dev: the device
+ * @heap: the heap to add
+ */
+void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
+
+/**
+ * some helpers for common operations on buffers using the sg_table
+ * and vaddr fields
+ */
+void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *);
+void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
+int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
+ struct vm_area_struct *);
+int ion_heap_buffer_zero(struct ion_buffer *buffer);
+int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
+
+/**
+ * ion_heap_init_shrinker
+ * @heap: the heap
+ *
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
+ * this function will be called to setup a shrinker to shrink the freelists
+ * and call the heap's shrink op.
+ */
+void ion_heap_init_shrinker(struct ion_heap *heap);
+
+/**
+ * ion_heap_init_deferred_free -- initialize deferred free functionality
+ * @heap: the heap
+ *
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
+ * be called to setup deferred frees. Calls to free the buffer will
+ * return immediately and the actual free will occur some time later
+ */
+int ion_heap_init_deferred_free(struct ion_heap *heap);
+
+/**
+ * ion_heap_freelist_add - add a buffer to the deferred free list
+ * @heap: the heap
+ * @buffer: the buffer
+ *
+ * Adds an item to the deferred freelist.
+ */
+void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
+
+/**
+ * ion_heap_freelist_drain - drain the deferred free list
+ * @heap: the heap
+ * @size: ammount of memory to drain in bytes
+ *
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed. The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
+ */
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
+
+/**
+ * ion_heap_freelist_shrink - drain the deferred free
+ * list, skipping any heap-specific
+ * pooling or caching mechanisms
+ *
+ * @heap: the heap
+ * @size: amount of memory to drain in bytes
+ *
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed. The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
+ *
+ * Unlike with @ion_heap_freelist_drain, don't put any pages back into
+ * page pools or otherwise cache the pages. Everything must be
+ * genuinely free'd back to the system. If you're free'ing from a
+ * shrinker you probably want to use this. Note that this relies on
+ * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
+ * flag.
+ */
+size_t ion_heap_freelist_shrink(struct ion_heap *heap,
+ size_t size);
+
+/**
+ * ion_heap_freelist_size - returns the size of the freelist in bytes
+ * @heap: the heap
+ */
+size_t ion_heap_freelist_size(struct ion_heap *heap);
+
+
+/**
+ * functions for creating and destroying the built in ion heaps.
+ * architectures can add their own custom architecture specific
+ * heaps as appropriate.
+ */
+
+struct ion_heap *ion_heap_create(struct ion_platform_heap *);
+void ion_heap_destroy(struct ion_heap *);
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
+void ion_system_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *);
+void ion_system_contig_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
+void ion_carveout_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *);
+void ion_chunk_heap_destroy(struct ion_heap *);
+struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
+void ion_cma_heap_destroy(struct ion_heap *);
+
+/**
+ * kernel api to allocate/free from carveout -- used when carveout is
+ * used to back an architecture specific custom heap
+ */
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
+ unsigned long align);
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+ unsigned long size);
+/**
+ * The carveout heap returns physical addresses, since 0 may be a valid
+ * physical address, this is used to indicate allocation failed
+ */
+#define ION_CARVEOUT_ALLOCATE_FAIL -1
+
+/**
+ * functions for creating and destroying a heap pool -- allows you
+ * to keep a pool of pre allocated memory to use from your heap. Keeping
+ * a pool of memory that is ready for dma, ie any cached mapping have been
+ * invalidated from the cache, provides a significant peformance benefit on
+ * many systems */
+
+/**
+ * struct ion_page_pool - pagepool struct
+ * @high_count: number of highmem items in the pool
+ * @low_count: number of lowmem items in the pool
+ * @high_items: list of highmem items
+ * @low_items: list of lowmem items
+ * @mutex: lock protecting this struct and especially the count
+ * item list
+ * @gfp_mask: gfp_mask to use from alloc
+ * @order: order of pages in the pool
+ * @list: plist node for list of pools
+ *
+ * Allows you to keep a pool of pre allocated pages to use from your heap.
+ * Keeping a pool of pages that is ready for dma, ie any cached mapping have
+ * been invalidated from the cache, provides a significant peformance benefit
+ * on many systems
+ */
+struct ion_page_pool {
+ int high_count;
+ int low_count;
+ struct list_head high_items;
+ struct list_head low_items;
+ struct mutex mutex;
+ gfp_t gfp_mask;
+ unsigned int order;
+ struct plist_node list;
+};
+
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
+void ion_page_pool_destroy(struct ion_page_pool *);
+void *ion_page_pool_alloc(struct ion_page_pool *);
+void ion_page_pool_free(struct ion_page_pool *, struct page *);
+
+/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
+ * @pool: the pool
+ * @gfp_mask: the memory type to reclaim
+ * @nr_to_scan: number of items to shrink in pages
+ *
+ * returns the number of items freed in pages
+ */
+int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
+ int nr_to_scan);
+
+/**
+ * ion_pages_sync_for_device - cache flush pages for use with the specified
+ * device
+ * @dev: the device the pages will be used with
+ * @page: the first page to be flushed
+ * @size: size in bytes of region to be flushed
+ * @dir: direction of dma transfer
+ */
+void ion_pages_sync_for_device(struct device *dev, struct page *page,
+ size_t size, enum dma_data_direction dir);
+
+#endif /* _ION_PRIV_H */
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
new file mode 100644
index 00000000000..a052418d2ef
--- /dev/null
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -0,0 +1,446 @@
+/*
+ * drivers/gpu/ion/ion_system_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/page.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
+ __GFP_NORETRY) & ~__GFP_WAIT;
+static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
+static const unsigned int orders[] = {8, 4, 0};
+static const int num_orders = ARRAY_SIZE(orders);
+static int order_to_index(unsigned int order)
+{
+ int i;
+ for (i = 0; i < num_orders; i++)
+ if (order == orders[i])
+ return i;
+ BUG();
+ return -1;
+}
+
+static unsigned int order_to_size(int order)
+{
+ return PAGE_SIZE << order;
+}
+
+struct ion_system_heap {
+ struct ion_heap heap;
+ struct ion_page_pool **pools;
+};
+
+struct page_info {
+ struct page *page;
+ unsigned int order;
+ struct list_head list;
+};
+
+static struct page *alloc_buffer_page(struct ion_system_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long order)
+{
+ bool cached = ion_buffer_cached(buffer);
+ struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+ struct page *page;
+
+ if (!cached) {
+ page = ion_page_pool_alloc(pool);
+ } else {
+ gfp_t gfp_flags = low_order_gfp_flags;
+
+ if (order > 4)
+ gfp_flags = high_order_gfp_flags;
+ page = alloc_pages(gfp_flags, order);
+ if (!page)
+ return NULL;
+ ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
+ DMA_BIDIRECTIONAL);
+ }
+ if (!page)
+ return NULL;
+
+ return page;
+}
+
+static void free_buffer_page(struct ion_system_heap *heap,
+ struct ion_buffer *buffer, struct page *page,
+ unsigned int order)
+{
+ bool cached = ion_buffer_cached(buffer);
+
+ if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) {
+ struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+ ion_page_pool_free(pool, page);
+ } else {
+ __free_pages(page, order);
+ }
+}
+
+
+static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size,
+ unsigned int max_order)
+{
+ struct page *page;
+ struct page_info *info;
+ int i;
+
+ info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
+ if (!info)
+ return NULL;
+
+ for (i = 0; i < num_orders; i++) {
+ if (size < order_to_size(orders[i]))
+ continue;
+ if (max_order < orders[i])
+ continue;
+
+ page = alloc_buffer_page(heap, buffer, orders[i]);
+ if (!page)
+ continue;
+
+ info->page = page;
+ info->order = orders[i];
+ INIT_LIST_HEAD(&info->list);
+ return info;
+ }
+ kfree(info);
+
+ return NULL;
+}
+
+static int ion_system_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
+{
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int ret;
+ struct list_head pages;
+ struct page_info *info, *tmp_info;
+ int i = 0;
+ unsigned long size_remaining = PAGE_ALIGN(size);
+ unsigned int max_order = orders[0];
+
+ if (align > PAGE_SIZE)
+ return -EINVAL;
+
+ if (size / PAGE_SIZE > totalram_pages / 2)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&pages);
+ while (size_remaining > 0) {
+ info = alloc_largest_available(sys_heap, buffer, size_remaining,
+ max_order);
+ if (!info)
+ goto err;
+ list_add_tail(&info->list, &pages);
+ size_remaining -= (1 << info->order) * PAGE_SIZE;
+ max_order = info->order;
+ i++;
+ }
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
+ goto err;
+
+ ret = sg_alloc_table(table, i, GFP_KERNEL);
+ if (ret)
+ goto err1;
+
+ sg = table->sgl;
+ list_for_each_entry_safe(info, tmp_info, &pages, list) {
+ struct page *page = info->page;
+ sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0);
+ sg = sg_next(sg);
+ list_del(&info->list);
+ kfree(info);
+ }
+
+ buffer->priv_virt = table;
+ return 0;
+err1:
+ kfree(table);
+err:
+ list_for_each_entry_safe(info, tmp_info, &pages, list) {
+ free_buffer_page(sys_heap, buffer, info->page, info->order);
+ kfree(info);
+ }
+ return -ENOMEM;
+}
+
+static void ion_system_heap_free(struct ion_buffer *buffer)
+{
+ struct ion_heap *heap = buffer->heap;
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ struct sg_table *table = buffer->sg_table;
+ bool cached = ion_buffer_cached(buffer);
+ struct scatterlist *sg;
+ LIST_HEAD(pages);
+ int i;
+
+ /* uncached pages come from the page pools, zero them before returning
+ for security purposes (other allocations are zerod at alloc time */
+ if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
+ ion_heap_buffer_zero(buffer);
+
+ for_each_sg(table->sgl, sg, table->nents, i)
+ free_buffer_page(sys_heap, buffer, sg_page(sg),
+ get_order(sg->length));
+ sg_free_table(table);
+ kfree(table);
+}
+
+static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+static void ion_system_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return;
+}
+
+static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
+ int nr_to_scan)
+{
+ struct ion_system_heap *sys_heap;
+ int nr_total = 0;
+ int i;
+
+ sys_heap = container_of(heap, struct ion_system_heap, heap);
+
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool = sys_heap->pools[i];
+ nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
+ }
+
+ return nr_total;
+}
+
+static struct ion_heap_ops system_heap_ops = {
+ .allocate = ion_system_heap_allocate,
+ .free = ion_system_heap_free,
+ .map_dma = ion_system_heap_map_dma,
+ .unmap_dma = ion_system_heap_unmap_dma,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
+ .map_user = ion_heap_map_user,
+ .shrink = ion_system_heap_shrink,
+};
+
+static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
+ void *unused)
+{
+
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ int i;
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool = sys_heap->pools[i];
+ seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
+ pool->high_count, pool->order,
+ (1 << pool->order) * PAGE_SIZE * pool->high_count);
+ seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
+ pool->low_count, pool->order,
+ (1 << pool->order) * PAGE_SIZE * pool->low_count);
+ }
+ return 0;
+}
+
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
+{
+ struct ion_system_heap *heap;
+ int i;
+
+ heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
+ if (!heap)
+ return ERR_PTR(-ENOMEM);
+ heap->heap.ops = &system_heap_ops;
+ heap->heap.type = ION_HEAP_TYPE_SYSTEM;
+ heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+ heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
+ GFP_KERNEL);
+ if (!heap->pools)
+ goto err_alloc_pools;
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool;
+ gfp_t gfp_flags = low_order_gfp_flags;
+
+ if (orders[i] > 4)
+ gfp_flags = high_order_gfp_flags;
+ pool = ion_page_pool_create(gfp_flags, orders[i]);
+ if (!pool)
+ goto err_create_pool;
+ heap->pools[i] = pool;
+ }
+
+ heap->heap.debug_show = ion_system_heap_debug_show;
+ return &heap->heap;
+err_create_pool:
+ for (i = 0; i < num_orders; i++)
+ if (heap->pools[i])
+ ion_page_pool_destroy(heap->pools[i]);
+ kfree(heap->pools);
+err_alloc_pools:
+ kfree(heap);
+ return ERR_PTR(-ENOMEM);
+}
+
+void ion_system_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ int i;
+
+ for (i = 0; i < num_orders; i++)
+ ion_page_pool_destroy(sys_heap->pools[i]);
+ kfree(sys_heap->pools);
+ kfree(sys_heap);
+}
+
+static int ion_system_contig_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long len,
+ unsigned long align,
+ unsigned long flags)
+{
+ int order = get_order(len);
+ struct page *page;
+ struct sg_table *table;
+ unsigned long i;
+ int ret;
+
+ if (align > (PAGE_SIZE << order))
+ return -EINVAL;
+
+ page = alloc_pages(low_order_gfp_flags, order);
+ if (!page)
+ return -ENOMEM;
+
+ split_page(page, order);
+
+ len = PAGE_ALIGN(len);
+ for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
+ __free_page(page + i);
+
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret)
+ goto out;
+
+ sg_set_page(table->sgl, page, len, 0);
+
+ buffer->priv_virt = table;
+
+ ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
+
+ return 0;
+
+out:
+ for (i = 0; i < len >> PAGE_SHIFT; i++)
+ __free_page(page + i);
+ kfree(table);
+ return ret;
+}
+
+static void ion_system_contig_heap_free(struct ion_buffer *buffer)
+{
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
+ unsigned long i;
+
+ for (i = 0; i < pages; i++)
+ __free_page(page + i);
+ sg_free_table(table);
+ kfree(table);
+}
+
+static int ion_system_contig_heap_phys(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ *addr = page_to_phys(page);
+ *len = buffer->size;
+ return 0;
+}
+
+static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+}
+
+static struct ion_heap_ops kmalloc_ops = {
+ .allocate = ion_system_contig_heap_allocate,
+ .free = ion_system_contig_heap_free,
+ .phys = ion_system_contig_heap_phys,
+ .map_dma = ion_system_contig_heap_map_dma,
+ .unmap_dma = ion_system_contig_heap_unmap_dma,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
+ .map_user = ion_heap_map_user,
+};
+
+struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
+{
+ struct ion_heap *heap;
+
+ heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+ if (!heap)
+ return ERR_PTR(-ENOMEM);
+ heap->ops = &kmalloc_ops;
+ heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
+ return heap;
+}
+
+void ion_system_contig_heap_destroy(struct ion_heap *heap)
+{
+ kfree(heap);
+}
+
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
new file mode 100644
index 00000000000..654acb5c8eb
--- /dev/null
+++ b/drivers/staging/android/ion/ion_test.c
@@ -0,0 +1,282 @@
+/*
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "ion-test: " fmt
+
+#include <linux/dma-buf.h>
+#include <linux/dma-direction.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include "ion.h"
+#include "../uapi/ion_test.h"
+
+#define u64_to_uptr(x) ((void __user *)(unsigned long)(x))
+
+struct ion_test_device {
+ struct miscdevice misc;
+};
+
+struct ion_test_data {
+ struct dma_buf *dma_buf;
+ struct device *dev;
+};
+
+static int ion_handle_test_dma(struct device *dev, struct dma_buf *dma_buf,
+ void __user *ptr, size_t offset, size_t size, bool write)
+{
+ int ret = 0;
+ struct dma_buf_attachment *attach;
+ struct sg_table *table;
+ pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
+ enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ struct sg_page_iter sg_iter;
+ unsigned long offset_page;
+
+ attach = dma_buf_attach(dma_buf, dev);
+ if (IS_ERR(attach))
+ return PTR_ERR(attach);
+
+ table = dma_buf_map_attachment(attach, dir);
+ if (IS_ERR(table))
+ return PTR_ERR(table);
+
+ offset_page = offset >> PAGE_SHIFT;
+ offset %= PAGE_SIZE;
+
+ for_each_sg_page(table->sgl, &sg_iter, table->nents, offset_page) {
+ struct page *page = sg_page_iter_page(&sg_iter);
+ void *vaddr = vmap(&page, 1, VM_MAP, pgprot);
+ size_t to_copy = PAGE_SIZE - offset;
+
+ to_copy = min(to_copy, size);
+ if (!vaddr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (write)
+ ret = copy_from_user(vaddr + offset, ptr, to_copy);
+ else
+ ret = copy_to_user(ptr, vaddr + offset, to_copy);
+
+ vunmap(vaddr);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+ size -= to_copy;
+ if (!size)
+ break;
+ ptr += to_copy;
+ offset = 0;
+ }
+
+err:
+ dma_buf_unmap_attachment(attach, table, dir);
+ dma_buf_detach(dma_buf, attach);
+ return ret;
+}
+
+static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr,
+ size_t offset, size_t size, bool write)
+{
+ int ret;
+ unsigned long page_offset = offset >> PAGE_SHIFT;
+ size_t copy_offset = offset % PAGE_SIZE;
+ size_t copy_size = size;
+ enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ if (offset > dma_buf->size || size > dma_buf->size - offset)
+ return -EINVAL;
+
+ ret = dma_buf_begin_cpu_access(dma_buf, offset, size, dir);
+ if (ret)
+ return ret;
+
+ while (copy_size > 0) {
+ size_t to_copy;
+ void *vaddr = dma_buf_kmap(dma_buf, page_offset);
+
+ if (!vaddr)
+ goto err;
+
+ to_copy = min_t(size_t, PAGE_SIZE - copy_offset, copy_size);
+
+ if (write)
+ ret = copy_from_user(vaddr + copy_offset, ptr, to_copy);
+ else
+ ret = copy_to_user(ptr, vaddr + copy_offset, to_copy);
+
+ dma_buf_kunmap(dma_buf, page_offset, vaddr);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ copy_size -= to_copy;
+ ptr += to_copy;
+ page_offset++;
+ copy_offset = 0;
+ }
+err:
+ dma_buf_end_cpu_access(dma_buf, offset, size, dir);
+ return ret;
+}
+
+static long ion_test_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct ion_test_data *test_data = filp->private_data;
+ int ret = 0;
+
+ union {
+ struct ion_test_rw_data test_rw;
+ } data;
+
+ if (_IOC_SIZE(cmd) > sizeof(data))
+ return -EINVAL;
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE)
+ if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case ION_IOC_TEST_SET_FD:
+ {
+ struct dma_buf *dma_buf = NULL;
+ int fd = arg;
+
+ if (fd >= 0) {
+ dma_buf = dma_buf_get((int)arg);
+ if (IS_ERR(dma_buf))
+ return PTR_ERR(dma_buf);
+ }
+ if (test_data->dma_buf)
+ dma_buf_put(test_data->dma_buf);
+ test_data->dma_buf = dma_buf;
+ break;
+ }
+ case ION_IOC_TEST_DMA_MAPPING:
+ {
+ ret = ion_handle_test_dma(test_data->dev, test_data->dma_buf,
+ u64_to_uptr(data.test_rw.ptr),
+ data.test_rw.offset, data.test_rw.size,
+ data.test_rw.write);
+ break;
+ }
+ case ION_IOC_TEST_KERNEL_MAPPING:
+ {
+ ret = ion_handle_test_kernel(test_data->dma_buf,
+ u64_to_uptr(data.test_rw.ptr),
+ data.test_rw.offset, data.test_rw.size,
+ data.test_rw.write);
+ break;
+ }
+ default:
+ return -ENOTTY;
+ }
+
+ if (_IOC_DIR(cmd) & _IOC_READ) {
+ if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+ return -EFAULT;
+ }
+ return ret;
+}
+
+static int ion_test_open(struct inode *inode, struct file *file)
+{
+ struct ion_test_data *data;
+ struct miscdevice *miscdev = file->private_data;
+
+ data = kzalloc(sizeof(struct ion_test_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = miscdev->parent;
+
+ file->private_data = data;
+
+ return 0;
+}
+
+static int ion_test_release(struct inode *inode, struct file *file)
+{
+ struct ion_test_data *data = file->private_data;
+
+ kfree(data);
+
+ return 0;
+}
+
+static const struct file_operations ion_test_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = ion_test_ioctl,
+ .compat_ioctl = ion_test_ioctl,
+ .open = ion_test_open,
+ .release = ion_test_release,
+};
+
+static int __init ion_test_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct ion_test_device *testdev;
+
+ testdev = devm_kzalloc(&pdev->dev, sizeof(struct ion_test_device),
+ GFP_KERNEL);
+ if (!testdev)
+ return -ENOMEM;
+
+ testdev->misc.minor = MISC_DYNAMIC_MINOR;
+ testdev->misc.name = "ion-test";
+ testdev->misc.fops = &ion_test_fops;
+ testdev->misc.parent = &pdev->dev;
+ ret = misc_register(&testdev->misc);
+ if (ret) {
+ pr_err("failed to register misc device.\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, testdev);
+
+ return 0;
+}
+
+static struct platform_driver ion_test_platform_driver = {
+ .driver = {
+ .name = "ion-test",
+ },
+};
+
+static int __init ion_test_init(void)
+{
+ platform_device_register_simple("ion-test", -1, NULL, 0);
+ return platform_driver_probe(&ion_test_platform_driver, ion_test_probe);
+}
+
+static void __exit ion_test_exit(void)
+{
+ platform_driver_unregister(&ion_test_platform_driver);
+}
+
+module_init(ion_test_init);
+module_exit(ion_test_exit);
diff --git a/drivers/staging/android/ion/tegra/Makefile b/drivers/staging/android/ion/tegra/Makefile
new file mode 100644
index 00000000000..11cd003fb08
--- /dev/null
+++ b/drivers/staging/android/ion/tegra/Makefile
@@ -0,0 +1 @@
+obj-y += tegra_ion.o
diff --git a/drivers/staging/android/ion/tegra/tegra_ion.c b/drivers/staging/android/ion/tegra/tegra_ion.c
new file mode 100644
index 00000000000..3474c65f87f
--- /dev/null
+++ b/drivers/staging/android/ion/tegra/tegra_ion.c
@@ -0,0 +1,84 @@
+/*
+ * drivers/gpu/tegra/tegra_ion.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "../ion.h"
+#include "../ion_priv.h"
+
+static struct ion_device *idev;
+static int num_heaps;
+static struct ion_heap **heaps;
+
+static int tegra_ion_probe(struct platform_device *pdev)
+{
+ struct ion_platform_data *pdata = pdev->dev.platform_data;
+ int err;
+ int i;
+
+ num_heaps = pdata->nr;
+
+ heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL);
+
+ idev = ion_device_create(NULL);
+ if (IS_ERR_OR_NULL(idev)) {
+ kfree(heaps);
+ return PTR_ERR(idev);
+ }
+
+ /* create the heaps as specified in the board file */
+ for (i = 0; i < num_heaps; i++) {
+ struct ion_platform_heap *heap_data = &pdata->heaps[i];
+
+ heaps[i] = ion_heap_create(heap_data);
+ if (IS_ERR_OR_NULL(heaps[i])) {
+ err = PTR_ERR(heaps[i]);
+ goto err;
+ }
+ ion_device_add_heap(idev, heaps[i]);
+ }
+ platform_set_drvdata(pdev, idev);
+ return 0;
+err:
+ for (i = 0; i < num_heaps; i++) {
+ if (heaps[i])
+ ion_heap_destroy(heaps[i]);
+ }
+ kfree(heaps);
+ return err;
+}
+
+static int tegra_ion_remove(struct platform_device *pdev)
+{
+ struct ion_device *idev = platform_get_drvdata(pdev);
+ int i;
+
+ ion_device_destroy(idev);
+ for (i = 0; i < num_heaps; i++)
+ ion_heap_destroy(heaps[i]);
+ kfree(heaps);
+ return 0;
+}
+
+static struct platform_driver ion_driver = {
+ .probe = tegra_ion_probe,
+ .remove = tegra_ion_remove,
+ .driver = { .name = "ion-tegra" }
+};
+
+module_platform_driver(ion_driver);
+
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index fe74494868e..a56e0894f66 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -39,7 +39,6 @@
#include <linux/sched.h>
#include <linux/swap.h>
#include <linux/rcupdate.h>
-#include <linux/profile.h>
#include <linux/notifier.h>
static uint32_t lowmem_debug_level = 1;
@@ -74,6 +73,7 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
int tasksize;
int i;
short min_score_adj = OOM_SCORE_ADJ_MAX + 1;
+ int minfree = 0;
int selected_tasksize = 0;
short selected_oom_score_adj;
int array_size = ARRAY_SIZE(lowmem_adj);
@@ -86,8 +86,8 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
if (lowmem_minfree_size < array_size)
array_size = lowmem_minfree_size;
for (i = 0; i < array_size; i++) {
- if (other_free < lowmem_minfree[i] &&
- other_file < lowmem_minfree[i]) {
+ minfree = lowmem_minfree[i];
+ if (other_free < minfree && other_file < minfree) {
min_score_adj = lowmem_adj[i];
break;
}
@@ -144,13 +144,22 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
selected = p;
selected_tasksize = tasksize;
selected_oom_score_adj = oom_score_adj;
- lowmem_print(2, "select %d (%s), adj %hd, size %d, to kill\n",
- p->pid, p->comm, oom_score_adj, tasksize);
+ lowmem_print(2, "select '%s' (%d), adj %hd, size %d, to kill\n",
+ p->comm, p->pid, oom_score_adj, tasksize);
}
if (selected) {
- lowmem_print(1, "send sigkill to %d (%s), adj %hd, size %d\n",
- selected->pid, selected->comm,
- selected_oom_score_adj, selected_tasksize);
+ lowmem_print(1, "Killing '%s' (%d), adj %hd,\n" \
+ " to free %ldkB on behalf of '%s' (%d) because\n" \
+ " cache %ldkB is below limit %ldkB for oom_score_adj %hd\n" \
+ " Free memory is %ldkB above reserved\n",
+ selected->comm, selected->pid,
+ selected_oom_score_adj,
+ selected_tasksize * (long)(PAGE_SIZE / 1024),
+ current->comm, current->pid,
+ other_file * (long)(PAGE_SIZE / 1024),
+ minfree * (long)(PAGE_SIZE / 1024),
+ min_score_adj,
+ other_free * (long)(PAGE_SIZE / 1024));
lowmem_deathpending_timeout = jiffies + HZ;
send_sig(SIGKILL, selected, 0);
set_tsk_thread_flag(selected, TIF_MEMDIE);
@@ -178,9 +187,94 @@ static void __exit lowmem_exit(void)
unregister_shrinker(&lowmem_shrinker);
}
+#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
+static short lowmem_oom_adj_to_oom_score_adj(short oom_adj)
+{
+ if (oom_adj == OOM_ADJUST_MAX)
+ return OOM_SCORE_ADJ_MAX;
+ else
+ return (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE;
+}
+
+static void lowmem_autodetect_oom_adj_values(void)
+{
+ int i;
+ short oom_adj;
+ short oom_score_adj;
+ int array_size = ARRAY_SIZE(lowmem_adj);
+
+ if (lowmem_adj_size < array_size)
+ array_size = lowmem_adj_size;
+
+ if (array_size <= 0)
+ return;
+
+ oom_adj = lowmem_adj[array_size - 1];
+ if (oom_adj > OOM_ADJUST_MAX)
+ return;
+
+ oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj);
+ if (oom_score_adj <= OOM_ADJUST_MAX)
+ return;
+
+ lowmem_print(1, "lowmem_shrink: convert oom_adj to oom_score_adj:\n");
+ for (i = 0; i < array_size; i++) {
+ oom_adj = lowmem_adj[i];
+ oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj);
+ lowmem_adj[i] = oom_score_adj;
+ lowmem_print(1, "oom_adj %d => oom_score_adj %d\n",
+ oom_adj, oom_score_adj);
+ }
+}
+
+static int lowmem_adj_array_set(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_array_ops.set(val, kp);
+
+ /* HACK: Autodetect oom_adj values in lowmem_adj array */
+ lowmem_autodetect_oom_adj_values();
+
+ return ret;
+}
+
+static int lowmem_adj_array_get(char *buffer, const struct kernel_param *kp)
+{
+ return param_array_ops.get(buffer, kp);
+}
+
+static void lowmem_adj_array_free(void *arg)
+{
+ param_array_ops.free(arg);
+}
+
+static struct kernel_param_ops lowmem_adj_array_ops = {
+ .set = lowmem_adj_array_set,
+ .get = lowmem_adj_array_get,
+ .free = lowmem_adj_array_free,
+};
+
+static const struct kparam_array __param_arr_adj = {
+ .max = ARRAY_SIZE(lowmem_adj),
+ .num = &lowmem_adj_size,
+ .ops = &param_ops_short,
+ .elemsize = sizeof(lowmem_adj[0]),
+ .elem = lowmem_adj,
+};
+#endif
+
module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
+#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
+__module_param_call(MODULE_PARAM_PREFIX, adj,
+ &lowmem_adj_array_ops,
+ .arr = &__param_arr_adj,
+ S_IRUGO | S_IWUSR, -1);
+__MODULE_PARM_TYPE(adj, "array of short");
+#else
module_param_array_named(adj, lowmem_adj, short, &lowmem_adj_size,
S_IRUGO | S_IWUSR);
+#endif
module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
S_IRUGO | S_IWUSR);
module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h
index 585040be5f1..1a50669ec8a 100644
--- a/drivers/staging/android/sw_sync.h
+++ b/drivers/staging/android/sw_sync.h
@@ -18,10 +18,9 @@
#define _LINUX_SW_SYNC_H
#include <linux/types.h>
-
-#ifdef __KERNEL__
-
+#include <linux/kconfig.h>
#include "sync.h"
+#include "uapi/sw_sync.h"
struct sw_sync_timeline {
struct sync_timeline obj;
@@ -35,24 +34,26 @@ struct sw_sync_pt {
u32 value;
};
+#if IS_ENABLED(CONFIG_SW_SYNC)
struct sw_sync_timeline *sw_sync_timeline_create(const char *name);
void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc);
struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value);
-
-#endif /* __KERNEL __ */
-
-struct sw_sync_create_fence_data {
- __u32 value;
- char name[32];
- __s32 fence; /* fd of new fence */
-};
-
-#define SW_SYNC_IOC_MAGIC 'W'
-
-#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
- struct sw_sync_create_fence_data)
-#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
-
+#else
+static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
+{
+ return NULL;
+}
+
+static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
+{
+}
+
+static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj,
+ u32 value)
+{
+ return NULL;
+}
+#endif /* IS_ENABLED(CONFIG_SW_SYNC) */
#endif /* _LINUX_SW_SYNC_H */
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
index 3893a357476..d38305b4093 100644
--- a/drivers/staging/android/sync.c
+++ b/drivers/staging/android/sync.c
@@ -79,27 +79,27 @@ static void sync_timeline_free(struct kref *kref)
container_of(kref, struct sync_timeline, kref);
unsigned long flags;
- if (obj->ops->release_obj)
- obj->ops->release_obj(obj);
-
spin_lock_irqsave(&sync_timeline_list_lock, flags);
list_del(&obj->sync_timeline_list);
spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+ if (obj->ops->release_obj)
+ obj->ops->release_obj(obj);
+
kfree(obj);
}
void sync_timeline_destroy(struct sync_timeline *obj)
{
obj->destroyed = true;
+ smp_wmb();
/*
- * If this is not the last reference, signal any children
- * that their parent is going away.
+ * signal any children that their parent is going away.
*/
+ sync_timeline_signal(obj);
- if (!kref_put(&obj->kref, sync_timeline_free))
- sync_timeline_signal(obj);
+ kref_put(&obj->kref, sync_timeline_free);
}
EXPORT_SYMBOL(sync_timeline_destroy);
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h
index 38ea986dc70..75da9e85ac6 100644
--- a/drivers/staging/android/sync.h
+++ b/drivers/staging/android/sync.h
@@ -14,14 +14,14 @@
#define _LINUX_SYNC_H
#include <linux/types.h>
-#ifdef __KERNEL__
-
#include <linux/kref.h>
#include <linux/ktime.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
+#include "uapi/sync.h"
+
struct sync_timeline;
struct sync_pt;
struct sync_fence;
@@ -341,86 +341,4 @@ int sync_fence_cancel_async(struct sync_fence *fence,
*/
int sync_fence_wait(struct sync_fence *fence, long timeout);
-#endif /* __KERNEL__ */
-
-/**
- * struct sync_merge_data - data passed to merge ioctl
- * @fd2: file descriptor of second fence
- * @name: name of new fence
- * @fence: returns the fd of the new fence to userspace
- */
-struct sync_merge_data {
- __s32 fd2; /* fd of second fence */
- char name[32]; /* name of new fence */
- __s32 fence; /* fd on newly created fence */
-};
-
-/**
- * struct sync_pt_info - detailed sync_pt information
- * @len: length of sync_pt_info including any driver_data
- * @obj_name: name of parent sync_timeline
- * @driver_name: name of driver implmenting the parent
- * @status: status of the sync_pt 0:active 1:signaled <0:error
- * @timestamp_ns: timestamp of status change in nanoseconds
- * @driver_data: any driver dependant data
- */
-struct sync_pt_info {
- __u32 len;
- char obj_name[32];
- char driver_name[32];
- __s32 status;
- __u64 timestamp_ns;
-
- __u8 driver_data[0];
-};
-
-/**
- * struct sync_fence_info_data - data returned from fence info ioctl
- * @len: ioctl caller writes the size of the buffer its passing in.
- * ioctl returns length of sync_fence_data reutnred to userspace
- * including pt_info.
- * @name: name of fence
- * @status: status of fence. 1: signaled 0:active <0:error
- * @pt_info: a sync_pt_info struct for every sync_pt in the fence
- */
-struct sync_fence_info_data {
- __u32 len;
- char name[32];
- __s32 status;
-
- __u8 pt_info[0];
-};
-
-#define SYNC_IOC_MAGIC '>'
-
-/**
- * DOC: SYNC_IOC_WAIT - wait for a fence to signal
- *
- * pass timeout in milliseconds. Waits indefinitely timeout < 0.
- */
-#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32)
-
-/**
- * DOC: SYNC_IOC_MERGE - merge two fences
- *
- * Takes a struct sync_merge_data. Creates a new fence containing copies of
- * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the
- * new fence's fd in sync_merge_data.fence
- */
-#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
-
-/**
- * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
- *
- * Takes a struct sync_fence_info_data with extra space allocated for pt_info.
- * Caller should write the size of the buffer into len. On return, len is
- * updated to reflect the total size of the sync_fence_info_data including
- * pt_info.
- *
- * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
- * To itterate over the sync_pt_infos, use the sync_pt_info.len field.
- */
-#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\
- struct sync_fence_info_data)
-
#endif /* _LINUX_SYNC_H */
diff --git a/drivers/staging/android/uapi/android_alarm.h b/drivers/staging/android/uapi/android_alarm.h
new file mode 100644
index 00000000000..aa013f6f5f3
--- /dev/null
+++ b/drivers/staging/android/uapi/android_alarm.h
@@ -0,0 +1,62 @@
+/* drivers/staging/android/uapi/android_alarm.h
+ *
+ * Copyright (C) 2006-2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ANDROID_ALARM_H
+#define _UAPI_LINUX_ANDROID_ALARM_H
+
+#include <linux/ioctl.h>
+#include <linux/time.h>
+
+enum android_alarm_type {
+ /* return code bit numbers or set alarm arg */
+ ANDROID_ALARM_RTC_WAKEUP,
+ ANDROID_ALARM_RTC,
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+ ANDROID_ALARM_ELAPSED_REALTIME,
+ ANDROID_ALARM_SYSTEMTIME,
+
+ ANDROID_ALARM_TYPE_COUNT,
+
+ /* return code bit numbers */
+ /* ANDROID_ALARM_TIME_CHANGE = 16 */
+};
+
+enum android_alarm_return_flags {
+ ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
+ ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
+ 1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+ ANDROID_ALARM_ELAPSED_REALTIME_MASK =
+ 1U << ANDROID_ALARM_ELAPSED_REALTIME,
+ ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
+ ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
+};
+
+/* Disable alarm */
+#define ANDROID_ALARM_CLEAR(type) _IO('a', 0 | ((type) << 4))
+
+/* Ack last alarm and wait for next */
+#define ANDROID_ALARM_WAIT _IO('a', 1)
+
+#define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size)
+/* Set alarm */
+#define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec)
+#define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec)
+#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec)
+#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec)
+#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0)))
+#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4)
+
+#endif
diff --git a/drivers/staging/android/uapi/ashmem.h b/drivers/staging/android/uapi/ashmem.h
new file mode 100644
index 00000000000..ba4743c71d6
--- /dev/null
+++ b/drivers/staging/android/uapi/ashmem.h
@@ -0,0 +1,47 @@
+/*
+ * drivers/staging/android/uapi/ashmem.h
+ *
+ * Copyright 2008 Google Inc.
+ * Author: Robert Love
+ *
+ * This file is dual licensed. It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#ifndef _UAPI_LINUX_ASHMEM_H
+#define _UAPI_LINUX_ASHMEM_H
+
+#include <linux/ioctl.h>
+
+#define ASHMEM_NAME_LEN 256
+
+#define ASHMEM_NAME_DEF "dev/ashmem"
+
+/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
+#define ASHMEM_NOT_PURGED 0
+#define ASHMEM_WAS_PURGED 1
+
+/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
+#define ASHMEM_IS_UNPINNED 0
+#define ASHMEM_IS_PINNED 1
+
+struct ashmem_pin {
+ __u32 offset; /* offset into region, in bytes, page-aligned */
+ __u32 len; /* length forward from offset, in bytes, page-aligned */
+};
+
+#define __ASHMEMIOC 0x77
+
+#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
+#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
+#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t)
+#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4)
+#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long)
+#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6)
+#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin)
+#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
+#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9)
+#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10)
+
+#endif /* _UAPI_LINUX_ASHMEM_H */
diff --git a/drivers/staging/android/uapi/binder.h b/drivers/staging/android/uapi/binder.h
new file mode 100644
index 00000000000..b6cb483592c
--- /dev/null
+++ b/drivers/staging/android/uapi/binder.h
@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Based on, but no longer compatible with, the original
+ * OpenBinder.org binder driver interface, which is:
+ *
+ * Copyright (c) 2005 Palmsource, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_BINDER_H
+#define _UAPI_LINUX_BINDER_H
+
+#include <linux/ioctl.h>
+
+#define B_PACK_CHARS(c1, c2, c3, c4) \
+ ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
+#define B_TYPE_LARGE 0x85
+
+enum {
+ BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
+ BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
+ BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
+ BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
+ BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
+};
+
+enum {
+ FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
+ FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
+};
+
+/*
+ * This is the flattened representation of a Binder object for transfer
+ * between processes. The 'offsets' supplied as part of a binder transaction
+ * contains offsets into the data where these structures occur. The Binder
+ * driver takes care of re-writing the structure type and data as it moves
+ * between processes.
+ */
+struct flat_binder_object {
+ /* 8 bytes for large_flat_header. */
+ unsigned long type;
+ unsigned long flags;
+
+ /* 8 bytes of data. */
+ union {
+ void __user *binder; /* local object */
+ signed long handle; /* remote object */
+ };
+
+ /* extra data associated with local object */
+ void __user *cookie;
+};
+
+/*
+ * On 64-bit platforms where user code may run in 32-bits the driver must
+ * translate the buffer (and local binder) addresses appropriately.
+ */
+
+struct binder_write_read {
+ signed long write_size; /* bytes to write */
+ signed long write_consumed; /* bytes consumed by driver */
+ unsigned long write_buffer;
+ signed long read_size; /* bytes to read */
+ signed long read_consumed; /* bytes consumed by driver */
+ unsigned long read_buffer;
+};
+
+/* Use with BINDER_VERSION, driver fills in fields. */
+struct binder_version {
+ /* driver protocol version -- increment with incompatible change */
+ signed long protocol_version;
+};
+
+/* This is the current protocol version. */
+#define BINDER_CURRENT_PROTOCOL_VERSION 7
+
+#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
+#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
+#define BINDER_SET_MAX_THREADS _IOW('b', 5, size_t)
+#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
+#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
+#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
+#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
+
+/*
+ * NOTE: Two special error codes you should check for when calling
+ * in to the driver are:
+ *
+ * EINTR -- The operation has been interupted. This should be
+ * handled by retrying the ioctl() until a different error code
+ * is returned.
+ *
+ * ECONNREFUSED -- The driver is no longer accepting operations
+ * from your process. That is, the process is being destroyed.
+ * You should handle this by exiting from your process. Note
+ * that once this error code is returned, all further calls to
+ * the driver from any thread will return this same code.
+ */
+
+enum transaction_flags {
+ TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */
+ TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
+ TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
+ TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
+};
+
+struct binder_transaction_data {
+ /* The first two are only used for bcTRANSACTION and brTRANSACTION,
+ * identifying the target and contents of the transaction.
+ */
+ union {
+ size_t handle; /* target descriptor of command transaction */
+ void *ptr; /* target descriptor of return transaction */
+ } target;
+ void *cookie; /* target object cookie */
+ unsigned int code; /* transaction command */
+
+ /* General information about the transaction. */
+ unsigned int flags;
+ pid_t sender_pid;
+ uid_t sender_euid;
+ size_t data_size; /* number of bytes of data */
+ size_t offsets_size; /* number of bytes of offsets */
+
+ /* If this transaction is inline, the data immediately
+ * follows here; otherwise, it ends with a pointer to
+ * the data buffer.
+ */
+ union {
+ struct {
+ /* transaction data */
+ const void __user *buffer;
+ /* offsets from buffer to flat_binder_object structs */
+ const void __user *offsets;
+ } ptr;
+ uint8_t buf[8];
+ } data;
+};
+
+struct binder_ptr_cookie {
+ void *ptr;
+ void *cookie;
+};
+
+struct binder_pri_desc {
+ int priority;
+ int desc;
+};
+
+struct binder_pri_ptr_cookie {
+ int priority;
+ void *ptr;
+ void *cookie;
+};
+
+enum binder_driver_return_protocol {
+ BR_ERROR = _IOR('r', 0, int),
+ /*
+ * int: error code
+ */
+
+ BR_OK = _IO('r', 1),
+ /* No parameters! */
+
+ BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
+ BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
+ /*
+ * binder_transaction_data: the received command.
+ */
+
+ BR_ACQUIRE_RESULT = _IOR('r', 4, int),
+ /*
+ * not currently supported
+ * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
+ * Else the remote object has acquired a primary reference.
+ */
+
+ BR_DEAD_REPLY = _IO('r', 5),
+ /*
+ * The target of the last transaction (either a bcTRANSACTION or
+ * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters.
+ */
+
+ BR_TRANSACTION_COMPLETE = _IO('r', 6),
+ /*
+ * No parameters... always refers to the last transaction requested
+ * (including replies). Note that this will be sent even for
+ * asynchronous transactions.
+ */
+
+ BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
+ BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
+ BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
+ BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
+ /*
+ * not currently supported
+ * int: priority
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BR_NOOP = _IO('r', 12),
+ /*
+ * No parameters. Do nothing and examine the next command. It exists
+ * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
+ */
+
+ BR_SPAWN_LOOPER = _IO('r', 13),
+ /*
+ * No parameters. The driver has determined that a process has no
+ * threads waiting to service incoming transactions. When a process
+ * receives this command, it must spawn a new service thread and
+ * register it via bcENTER_LOOPER.
+ */
+
+ BR_FINISHED = _IO('r', 14),
+ /*
+ * not currently supported
+ * stop threadpool thread
+ */
+
+ BR_DEAD_BINDER = _IOR('r', 15, void *),
+ /*
+ * void *: cookie
+ */
+ BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *),
+ /*
+ * void *: cookie
+ */
+
+ BR_FAILED_REPLY = _IO('r', 17),
+ /*
+ * The the last transaction (either a bcTRANSACTION or
+ * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
+ */
+};
+
+enum binder_driver_command_protocol {
+ BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
+ BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
+ /*
+ * binder_transaction_data: the sent command.
+ */
+
+ BC_ACQUIRE_RESULT = _IOW('c', 2, int),
+ /*
+ * not currently supported
+ * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful.
+ * Else you have acquired a primary reference on the object.
+ */
+
+ BC_FREE_BUFFER = _IOW('c', 3, int),
+ /*
+ * void *: ptr to transaction data received on a read
+ */
+
+ BC_INCREFS = _IOW('c', 4, int),
+ BC_ACQUIRE = _IOW('c', 5, int),
+ BC_RELEASE = _IOW('c', 6, int),
+ BC_DECREFS = _IOW('c', 7, int),
+ /*
+ * int: descriptor
+ */
+
+ BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
+ BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
+ /*
+ * not currently supported
+ * int: priority
+ * int: descriptor
+ */
+
+ BC_REGISTER_LOOPER = _IO('c', 11),
+ /*
+ * No parameters.
+ * Register a spawned looper thread with the device.
+ */
+
+ BC_ENTER_LOOPER = _IO('c', 12),
+ BC_EXIT_LOOPER = _IO('c', 13),
+ /*
+ * No parameters.
+ * These two commands are sent as an application-level thread
+ * enters and exits the binder loop, respectively. They are
+ * used so the binder can have an accurate count of the number
+ * of looping threads it has available.
+ */
+
+ BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie
+ */
+
+ BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie
+ */
+
+ BC_DEAD_BINDER_DONE = _IOW('c', 16, void *),
+ /*
+ * void *: cookie
+ */
+};
+
+#endif /* _UAPI_LINUX_BINDER_H */
+
diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h
new file mode 100644
index 00000000000..f09e7c154d6
--- /dev/null
+++ b/drivers/staging/android/uapi/ion.h
@@ -0,0 +1,196 @@
+/*
+ * drivers/staging/android/uapi/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ION_H
+#define _UAPI_LINUX_ION_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+typedef int ion_user_handle_t;
+
+/**
+ * enum ion_heap_types - list of all possible types of heaps
+ * @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc
+ * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
+ * @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved
+ * carveout heap, allocations are physically
+ * contiguous
+ * @ION_HEAP_TYPE_DMA: memory allocated via DMA API
+ * @ION_NUM_HEAPS: helper for iterating over heaps, a bit mask
+ * is used to identify the heaps, so only 32
+ * total heap types are supported
+ */
+enum ion_heap_type {
+ ION_HEAP_TYPE_SYSTEM,
+ ION_HEAP_TYPE_SYSTEM_CONTIG,
+ ION_HEAP_TYPE_CARVEOUT,
+ ION_HEAP_TYPE_CHUNK,
+ ION_HEAP_TYPE_DMA,
+ ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
+ are at the end of this enum */
+ ION_NUM_HEAPS = 16,
+};
+
+#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM)
+#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
+#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
+#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA)
+
+#define ION_NUM_HEAP_IDS sizeof(unsigned int) * 8
+
+/**
+ * allocation flags - the lower 16 bits are used by core ion, the upper 16
+ * bits are reserved for use by the heaps themselves.
+ */
+#define ION_FLAG_CACHED 1 /* mappings of this buffer should be
+ cached, ion will do cache
+ maintenance when the buffer is
+ mapped for dma */
+#define ION_FLAG_CACHED_NEEDS_SYNC 2 /* mappings of this buffer will created
+ at mmap time, if this is set
+ caches must be managed manually */
+
+/**
+ * DOC: Ion Userspace API
+ *
+ * create a client by opening /dev/ion
+ * most operations handled via following ioctls
+ *
+ */
+
+/**
+ * struct ion_allocation_data - metadata passed from userspace for allocations
+ * @len: size of the allocation
+ * @align: required alignment of the allocation
+ * @heap_id_mask: mask of heap ids to allocate from
+ * @flags: flags passed to heap
+ * @handle: pointer that will be populated with a cookie to use to
+ * refer to this allocation
+ *
+ * Provided by userspace as an argument to the ioctl
+ */
+struct ion_allocation_data {
+ size_t len;
+ size_t align;
+ unsigned int heap_id_mask;
+ unsigned int flags;
+ ion_user_handle_t handle;
+};
+
+/**
+ * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
+ * @handle: a handle
+ * @fd: a file descriptor representing that handle
+ *
+ * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
+ * the handle returned from ion alloc, and the kernel returns the file
+ * descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace
+ * provides the file descriptor and the kernel returns the handle.
+ */
+struct ion_fd_data {
+ ion_user_handle_t handle;
+ int fd;
+};
+
+/**
+ * struct ion_handle_data - a handle passed to/from the kernel
+ * @handle: a handle
+ */
+struct ion_handle_data {
+ ion_user_handle_t handle;
+};
+
+/**
+ * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
+ * @cmd: the custom ioctl function to call
+ * @arg: additional data to pass to the custom ioctl, typically a user
+ * pointer to a predefined structure
+ *
+ * This works just like the regular cmd and arg fields of an ioctl.
+ */
+struct ion_custom_data {
+ unsigned int cmd;
+ unsigned long arg;
+};
+
+#define ION_IOC_MAGIC 'I'
+
+/**
+ * DOC: ION_IOC_ALLOC - allocate memory
+ *
+ * Takes an ion_allocation_data struct and returns it with the handle field
+ * populated with the opaque handle for the allocation.
+ */
+#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
+ struct ion_allocation_data)
+
+/**
+ * DOC: ION_IOC_FREE - free memory
+ *
+ * Takes an ion_handle_data struct and frees the handle.
+ */
+#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+
+/**
+ * DOC: ION_IOC_MAP - get a file descriptor to mmap
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle. Returns the struct with the fd field set to a file
+ * descriptor open in the current address space. This file descriptor
+ * can then be used as an argument to mmap.
+ */
+#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle. Returns the struct with the fd field set to a file
+ * descriptor open in the current address space. This file descriptor
+ * can then be passed to another process. The corresponding opaque handle can
+ * be retrieved via ION_IOC_IMPORT.
+ */
+#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_IMPORT - imports a shared file descriptor
+ *
+ * Takes an ion_fd_data struct with the fd field populated with a valid file
+ * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
+ * filed set to the corresponding opaque handle.
+ */
+#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
+ *
+ * Deprecated in favor of using the dma_buf api's correctly (syncing
+ * will happend automatically when the buffer is mapped to a device).
+ * If necessary should be used after touching a cached buffer from the cpu,
+ * this will make the buffer in memory coherent.
+ */
+#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
+ *
+ * Takes the argument of the architecture specific ioctl to call and
+ * passes appropriate userdata for that ioctl
+ */
+#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
+
+#endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/android/uapi/ion_test.h b/drivers/staging/android/uapi/ion_test.h
new file mode 100644
index 00000000000..ffef06f6313
--- /dev/null
+++ b/drivers/staging/android/uapi/ion_test.h
@@ -0,0 +1,70 @@
+/*
+ * drivers/staging/android/uapi/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ION_TEST_H
+#define _UAPI_LINUX_ION_TEST_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * struct ion_test_rw_data - metadata passed to the kernel to read handle
+ * @ptr: a pointer to an area at least as large as size
+ * @offset: offset into the ion buffer to start reading
+ * @size: size to read or write
+ * @write: 1 to write, 0 to read
+ */
+struct ion_test_rw_data {
+ __u64 ptr;
+ __u64 offset;
+ __u64 size;
+ int write;
+ int __padding;
+};
+
+#define ION_IOC_MAGIC 'I'
+
+/**
+ * DOC: ION_IOC_TEST_SET_DMA_BUF - attach a dma buf to the test driver
+ *
+ * Attaches a dma buf fd to the test driver. Passing a second fd or -1 will
+ * release the first fd.
+ */
+#define ION_IOC_TEST_SET_FD \
+ _IO(ION_IOC_MAGIC, 0xf0)
+
+/**
+ * DOC: ION_IOC_TEST_DMA_MAPPING - read or write memory from a handle as DMA
+ *
+ * Reads or writes the memory from a handle using an uncached mapping. Can be
+ * used by unit tests to emulate a DMA engine as close as possible. Only
+ * expected to be used for debugging and testing, may not always be available.
+ */
+#define ION_IOC_TEST_DMA_MAPPING \
+ _IOW(ION_IOC_MAGIC, 0xf1, struct ion_test_rw_data)
+
+/**
+ * DOC: ION_IOC_TEST_KERNEL_MAPPING - read or write memory from a handle
+ *
+ * Reads or writes the memory from a handle using a kernel mapping. Can be
+ * used by unit tests to test heap map_kernel functions. Only expected to be
+ * used for debugging and testing, may not always be available.
+ */
+#define ION_IOC_TEST_KERNEL_MAPPING \
+ _IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data)
+
+
+#endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/android/uapi/sw_sync.h b/drivers/staging/android/uapi/sw_sync.h
new file mode 100644
index 00000000000..9b5d4869505
--- /dev/null
+++ b/drivers/staging/android/uapi/sw_sync.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_SW_SYNC_H
+#define _UAPI_LINUX_SW_SYNC_H
+
+#include <linux/types.h>
+
+struct sw_sync_create_fence_data {
+ __u32 value;
+ char name[32];
+ __s32 fence; /* fd of new fence */
+};
+
+#define SW_SYNC_IOC_MAGIC 'W'
+
+#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
+ struct sw_sync_create_fence_data)
+#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+
+#endif /* _UAPI_LINUX_SW_SYNC_H */
diff --git a/drivers/staging/android/uapi/sync.h b/drivers/staging/android/uapi/sync.h
new file mode 100644
index 00000000000..57fdaadc4b0
--- /dev/null
+++ b/drivers/staging/android/uapi/sync.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_SYNC_H
+#define _UAPI_LINUX_SYNC_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * struct sync_merge_data - data passed to merge ioctl
+ * @fd2: file descriptor of second fence
+ * @name: name of new fence
+ * @fence: returns the fd of the new fence to userspace
+ */
+struct sync_merge_data {
+ __s32 fd2; /* fd of second fence */
+ char name[32]; /* name of new fence */
+ __s32 fence; /* fd on newly created fence */
+};
+
+/**
+ * struct sync_pt_info - detailed sync_pt information
+ * @len: length of sync_pt_info including any driver_data
+ * @obj_name: name of parent sync_timeline
+ * @driver_name: name of driver implmenting the parent
+ * @status: status of the sync_pt 0:active 1:signaled <0:error
+ * @timestamp_ns: timestamp of status change in nanoseconds
+ * @driver_data: any driver dependant data
+ */
+struct sync_pt_info {
+ __u32 len;
+ char obj_name[32];
+ char driver_name[32];
+ __s32 status;
+ __u64 timestamp_ns;
+
+ __u8 driver_data[0];
+};
+
+/**
+ * struct sync_fence_info_data - data returned from fence info ioctl
+ * @len: ioctl caller writes the size of the buffer its passing in.
+ * ioctl returns length of sync_fence_data reutnred to userspace
+ * including pt_info.
+ * @name: name of fence
+ * @status: status of fence. 1: signaled 0:active <0:error
+ * @pt_info: a sync_pt_info struct for every sync_pt in the fence
+ */
+struct sync_fence_info_data {
+ __u32 len;
+ char name[32];
+ __s32 status;
+
+ __u8 pt_info[0];
+};
+
+#define SYNC_IOC_MAGIC '>'
+
+/**
+ * DOC: SYNC_IOC_WAIT - wait for a fence to signal
+ *
+ * pass timeout in milliseconds. Waits indefinitely timeout < 0.
+ */
+#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32)
+
+/**
+ * DOC: SYNC_IOC_MERGE - merge two fences
+ *
+ * Takes a struct sync_merge_data. Creates a new fence containing copies of
+ * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the
+ * new fence's fd in sync_merge_data.fence
+ */
+#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
+
+/**
+ * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
+ *
+ * Takes a struct sync_fence_info_data with extra space allocated for pt_info.
+ * Caller should write the size of the buffer into len. On return, len is
+ * updated to reflect the total size of the sync_fence_info_data including
+ * pt_info.
+ *
+ * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
+ * To itterate over the sync_pt_infos, use the sync_pt_info.len field.
+ */
+#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\
+ struct sync_fence_info_data)
+
+#endif /* _UAPI_LINUX_SYNC_H */
diff --git a/drivers/switch/Kconfig b/drivers/switch/Kconfig
new file mode 100644
index 00000000000..19404b6f777
--- /dev/null
+++ b/drivers/switch/Kconfig
@@ -0,0 +1,15 @@
+menuconfig SWITCH
+ tristate "Switch class support"
+ help
+ Say Y here to enable switch class support. This allows
+ monitoring switches by userspace via sysfs and uevent.
+
+if SWITCH
+
+config SWITCH_GPIO
+ tristate "GPIO Swith support"
+ depends on GPIOLIB
+ help
+ Say Y here to enable GPIO based switch support.
+
+endif # SWITCH
diff --git a/drivers/switch/Makefile b/drivers/switch/Makefile
new file mode 100644
index 00000000000..f7606ed4a71
--- /dev/null
+++ b/drivers/switch/Makefile
@@ -0,0 +1,4 @@
+# Switch Class Driver
+obj-$(CONFIG_SWITCH) += switch_class.o
+obj-$(CONFIG_SWITCH_GPIO) += switch_gpio.o
+
diff --git a/drivers/switch/switch_class.c b/drivers/switch/switch_class.c
new file mode 100644
index 00000000000..e373b625806
--- /dev/null
+++ b/drivers/switch/switch_class.c
@@ -0,0 +1,174 @@
+/*
+ * drivers/switch/switch_class.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/switch.h>
+
+struct class *switch_class;
+static atomic_t device_count;
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct switch_dev *sdev = (struct switch_dev *)
+ dev_get_drvdata(dev);
+
+ if (sdev->print_state) {
+ int ret = sdev->print_state(sdev, buf);
+ if (ret >= 0)
+ return ret;
+ }
+ return sprintf(buf, "%d\n", sdev->state);
+}
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct switch_dev *sdev = (struct switch_dev *)
+ dev_get_drvdata(dev);
+
+ if (sdev->print_name) {
+ int ret = sdev->print_name(sdev, buf);
+ if (ret >= 0)
+ return ret;
+ }
+ return sprintf(buf, "%s\n", sdev->name);
+}
+
+static DEVICE_ATTR(state, S_IRUGO, state_show, NULL);
+static DEVICE_ATTR(name, S_IRUGO, name_show, NULL);
+
+void switch_set_state(struct switch_dev *sdev, int state)
+{
+ char name_buf[120];
+ char state_buf[120];
+ char *prop_buf;
+ char *envp[3];
+ int env_offset = 0;
+ int length;
+
+ if (sdev->state != state) {
+ sdev->state = state;
+
+ prop_buf = (char *)get_zeroed_page(GFP_KERNEL);
+ if (prop_buf) {
+ length = name_show(sdev->dev, NULL, prop_buf);
+ if (length > 0) {
+ if (prop_buf[length - 1] == '\n')
+ prop_buf[length - 1] = 0;
+ snprintf(name_buf, sizeof(name_buf),
+ "SWITCH_NAME=%s", prop_buf);
+ envp[env_offset++] = name_buf;
+ }
+ length = state_show(sdev->dev, NULL, prop_buf);
+ if (length > 0) {
+ if (prop_buf[length - 1] == '\n')
+ prop_buf[length - 1] = 0;
+ snprintf(state_buf, sizeof(state_buf),
+ "SWITCH_STATE=%s", prop_buf);
+ envp[env_offset++] = state_buf;
+ }
+ envp[env_offset] = NULL;
+ kobject_uevent_env(&sdev->dev->kobj, KOBJ_CHANGE, envp);
+ free_page((unsigned long)prop_buf);
+ } else {
+ printk(KERN_ERR "out of memory in switch_set_state\n");
+ kobject_uevent(&sdev->dev->kobj, KOBJ_CHANGE);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(switch_set_state);
+
+static int create_switch_class(void)
+{
+ if (!switch_class) {
+ switch_class = class_create(THIS_MODULE, "switch");
+ if (IS_ERR(switch_class))
+ return PTR_ERR(switch_class);
+ atomic_set(&device_count, 0);
+ }
+
+ return 0;
+}
+
+int switch_dev_register(struct switch_dev *sdev)
+{
+ int ret;
+
+ if (!switch_class) {
+ ret = create_switch_class();
+ if (ret < 0)
+ return ret;
+ }
+
+ sdev->index = atomic_inc_return(&device_count);
+ sdev->dev = device_create(switch_class, NULL,
+ MKDEV(0, sdev->index), NULL, sdev->name);
+ if (IS_ERR(sdev->dev))
+ return PTR_ERR(sdev->dev);
+
+ ret = device_create_file(sdev->dev, &dev_attr_state);
+ if (ret < 0)
+ goto err_create_file_1;
+ ret = device_create_file(sdev->dev, &dev_attr_name);
+ if (ret < 0)
+ goto err_create_file_2;
+
+ dev_set_drvdata(sdev->dev, sdev);
+ sdev->state = 0;
+ return 0;
+
+err_create_file_2:
+ device_remove_file(sdev->dev, &dev_attr_state);
+err_create_file_1:
+ device_destroy(switch_class, MKDEV(0, sdev->index));
+ printk(KERN_ERR "switch: Failed to register driver %s\n", sdev->name);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(switch_dev_register);
+
+void switch_dev_unregister(struct switch_dev *sdev)
+{
+ device_remove_file(sdev->dev, &dev_attr_name);
+ device_remove_file(sdev->dev, &dev_attr_state);
+ device_destroy(switch_class, MKDEV(0, sdev->index));
+ dev_set_drvdata(sdev->dev, NULL);
+}
+EXPORT_SYMBOL_GPL(switch_dev_unregister);
+
+static int __init switch_class_init(void)
+{
+ return create_switch_class();
+}
+
+static void __exit switch_class_exit(void)
+{
+ class_destroy(switch_class);
+}
+
+module_init(switch_class_init);
+module_exit(switch_class_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("Switch class driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/switch/switch_gpio.c b/drivers/switch/switch_gpio.c
new file mode 100644
index 00000000000..621d62d20c9
--- /dev/null
+++ b/drivers/switch/switch_gpio.c
@@ -0,0 +1,172 @@
+/*
+ * drivers/switch/switch_gpio.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/switch.h>
+#include <linux/workqueue.h>
+#include <linux/gpio.h>
+
+struct gpio_switch_data {
+ struct switch_dev sdev;
+ unsigned gpio;
+ const char *name_on;
+ const char *name_off;
+ const char *state_on;
+ const char *state_off;
+ int irq;
+ struct work_struct work;
+};
+
+static void gpio_switch_work(struct work_struct *work)
+{
+ int state;
+ struct gpio_switch_data *data =
+ container_of(work, struct gpio_switch_data, work);
+
+ state = gpio_get_value(data->gpio);
+ switch_set_state(&data->sdev, state);
+}
+
+static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
+{
+ struct gpio_switch_data *switch_data =
+ (struct gpio_switch_data *)dev_id;
+
+ schedule_work(&switch_data->work);
+ return IRQ_HANDLED;
+}
+
+static ssize_t switch_gpio_print_state(struct switch_dev *sdev, char *buf)
+{
+ struct gpio_switch_data *switch_data =
+ container_of(sdev, struct gpio_switch_data, sdev);
+ const char *state;
+ if (switch_get_state(sdev))
+ state = switch_data->state_on;
+ else
+ state = switch_data->state_off;
+
+ if (state)
+ return sprintf(buf, "%s\n", state);
+ return -1;
+}
+
+static int gpio_switch_probe(struct platform_device *pdev)
+{
+ struct gpio_switch_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_switch_data *switch_data;
+ int ret = 0;
+
+ if (!pdata)
+ return -EBUSY;
+
+ switch_data = kzalloc(sizeof(struct gpio_switch_data), GFP_KERNEL);
+ if (!switch_data)
+ return -ENOMEM;
+
+ switch_data->sdev.name = pdata->name;
+ switch_data->gpio = pdata->gpio;
+ switch_data->name_on = pdata->name_on;
+ switch_data->name_off = pdata->name_off;
+ switch_data->state_on = pdata->state_on;
+ switch_data->state_off = pdata->state_off;
+ switch_data->sdev.print_state = switch_gpio_print_state;
+
+ ret = switch_dev_register(&switch_data->sdev);
+ if (ret < 0)
+ goto err_switch_dev_register;
+
+ ret = gpio_request(switch_data->gpio, pdev->name);
+ if (ret < 0)
+ goto err_request_gpio;
+
+ ret = gpio_direction_input(switch_data->gpio);
+ if (ret < 0)
+ goto err_set_gpio_input;
+
+ INIT_WORK(&switch_data->work, gpio_switch_work);
+
+ switch_data->irq = gpio_to_irq(switch_data->gpio);
+ if (switch_data->irq < 0) {
+ ret = switch_data->irq;
+ goto err_detect_irq_num_failed;
+ }
+
+ ret = request_irq(switch_data->irq, gpio_irq_handler,
+ IRQF_TRIGGER_LOW, pdev->name, switch_data);
+ if (ret < 0)
+ goto err_request_irq;
+
+ /* Perform initial detection */
+ gpio_switch_work(&switch_data->work);
+
+ return 0;
+
+err_request_irq:
+err_detect_irq_num_failed:
+err_set_gpio_input:
+ gpio_free(switch_data->gpio);
+err_request_gpio:
+ switch_dev_unregister(&switch_data->sdev);
+err_switch_dev_register:
+ kfree(switch_data);
+
+ return ret;
+}
+
+static int gpio_switch_remove(struct platform_device *pdev)
+{
+ struct gpio_switch_data *switch_data = platform_get_drvdata(pdev);
+
+ cancel_work_sync(&switch_data->work);
+ gpio_free(switch_data->gpio);
+ switch_dev_unregister(&switch_data->sdev);
+ kfree(switch_data);
+
+ return 0;
+}
+
+static struct platform_driver gpio_switch_driver = {
+ .probe = gpio_switch_probe,
+ .remove = gpio_switch_remove,
+ .driver = {
+ .name = "switch-gpio",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init gpio_switch_init(void)
+{
+ return platform_driver_register(&gpio_switch_driver);
+}
+
+static void __exit gpio_switch_exit(void)
+{
+ platform_driver_unregister(&gpio_switch_driver);
+}
+
+module_init(gpio_switch_init);
+module_exit(gpio_switch_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("GPIO Switch driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index f87dbfd3277..7855f3a4ad0 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -95,6 +95,9 @@ static void __uart_start(struct tty_struct *tty)
struct uart_state *state = tty->driver_data;
struct uart_port *port = state->uart_port;
+ if (port->ops->wake_peer)
+ port->ops->wake_peer(port);
+
if (!uart_circ_empty(&state->xmit) && state->xmit.buf &&
!tty->stopped && !tty->hw_stopped)
port->ops->start_tx(port);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index f41aa0d0c41..67409fda70d 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -818,6 +818,24 @@ config USB_G_PRINTER
For more information, see Documentation/usb/gadget_printer.txt
which includes sample code for accessing the device file.
+config USB_G_ANDROID
+ boolean "Android Composite Gadget"
+ select USB_F_ACM
+ select USB_LIBCOMPOSITE
+ select USB_U_SERIAL
+ help
+ The Android Composite Gadget supports multiple USB
+ functions: adb, acm, mass storage, mtp, accessory
+ and rndis.
+ Each function can be configured and enabled/disabled
+ dynamically from userspace through a sysfs interface.
+
+config USB_ANDROID_RNDIS_DWORD_ALIGNED
+ boolean "Use double word aligned"
+ depends on USB_G_ANDROID
+ help
+ Provides dword aligned for DMA controller.
+
if TTY
config USB_CDC_COMPOSITE
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 6afd16659e7..0ec50ae5b63 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -65,6 +65,7 @@ g_nokia-y := nokia.o
g_webcam-y := webcam.o
g_ncm-y := ncm.o
g_acm_ms-y := acm_ms.o
+g_android-y := android.o
g_tcm_usb_gadget-y := tcm_usb_gadget.o
obj-$(CONFIG_USB_ZERO) += g_zero.o
@@ -84,4 +85,5 @@ obj-$(CONFIG_USB_G_NOKIA) += g_nokia.o
obj-$(CONFIG_USB_G_WEBCAM) += g_webcam.o
obj-$(CONFIG_USB_G_NCM) += g_ncm.o
obj-$(CONFIG_USB_G_ACM_MS) += g_acm_ms.o
+obj-$(CONFIG_USB_G_ANDROID) += g_android.o
obj-$(CONFIG_USB_GADGET_TARGET) += tcm_usb_gadget.o
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
new file mode 100644
index 00000000000..04cbeb13481
--- /dev/null
+++ b/drivers/usb/gadget/android.c
@@ -0,0 +1,1519 @@
+/*
+ * Gadget Driver for Android
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ * Benoit Goby <benoit@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/gadget.h>
+
+#include "gadget_chips.h"
+
+#include "f_fs.c"
+#include "f_audio_source.c"
+#include "f_mass_storage.c"
+#include "f_mtp.c"
+#include "f_accessory.c"
+#define USB_ETH_RNDIS y
+#include "f_rndis.c"
+#include "rndis.c"
+#include "u_ether.c"
+
+MODULE_AUTHOR("Mike Lockwood");
+MODULE_DESCRIPTION("Android Composite USB Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+
+static const char longname[] = "Gadget Android";
+
+/* Default vendor and product IDs, overridden by userspace */
+#define VENDOR_ID 0x18D1
+#define PRODUCT_ID 0x0001
+
+struct android_usb_function {
+ char *name;
+ void *config;
+
+ struct device *dev;
+ char *dev_name;
+ struct device_attribute **attributes;
+
+ /* for android_dev.enabled_functions */
+ struct list_head enabled_list;
+
+ /* Optional: initialization during gadget bind */
+ int (*init)(struct android_usb_function *, struct usb_composite_dev *);
+ /* Optional: cleanup during gadget unbind */
+ void (*cleanup)(struct android_usb_function *);
+ /* Optional: called when the function is added the list of
+ * enabled functions */
+ void (*enable)(struct android_usb_function *);
+ /* Optional: called when it is removed */
+ void (*disable)(struct android_usb_function *);
+
+ int (*bind_config)(struct android_usb_function *,
+ struct usb_configuration *);
+
+ /* Optional: called when the configuration is removed */
+ void (*unbind_config)(struct android_usb_function *,
+ struct usb_configuration *);
+ /* Optional: handle ctrl requests before the device is configured */
+ int (*ctrlrequest)(struct android_usb_function *,
+ struct usb_composite_dev *,
+ const struct usb_ctrlrequest *);
+};
+
+struct android_dev {
+ struct android_usb_function **functions;
+ struct list_head enabled_functions;
+ struct usb_composite_dev *cdev;
+ struct device *dev;
+
+ bool enabled;
+ int disable_depth;
+ struct mutex mutex;
+ bool connected;
+ bool sw_connected;
+ struct work_struct work;
+ char ffs_aliases[256];
+};
+
+static struct class *android_class;
+static struct android_dev *_android_dev;
+static int android_bind_config(struct usb_configuration *c);
+static void android_unbind_config(struct usb_configuration *c);
+
+/* string IDs are assigned dynamically */
+#define STRING_MANUFACTURER_IDX 0
+#define STRING_PRODUCT_IDX 1
+#define STRING_SERIAL_IDX 2
+
+static char manufacturer_string[256];
+static char product_string[256];
+static char serial_string[256];
+
+/* String Table */
+static struct usb_string strings_dev[] = {
+ [STRING_MANUFACTURER_IDX].s = manufacturer_string,
+ [STRING_PRODUCT_IDX].s = product_string,
+ [STRING_SERIAL_IDX].s = serial_string,
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+ .language = 0x0409, /* en-us */
+ .strings = strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+ &stringtab_dev,
+ NULL,
+};
+
+static struct usb_device_descriptor device_desc = {
+ .bLength = sizeof(device_desc),
+ .bDescriptorType = USB_DT_DEVICE,
+ .bcdUSB = __constant_cpu_to_le16(0x0200),
+ .bDeviceClass = USB_CLASS_PER_INTERFACE,
+ .idVendor = __constant_cpu_to_le16(VENDOR_ID),
+ .idProduct = __constant_cpu_to_le16(PRODUCT_ID),
+ .bcdDevice = __constant_cpu_to_le16(0xffff),
+ .bNumConfigurations = 1,
+};
+
+static struct usb_configuration android_config_driver = {
+ .label = "android",
+ .unbind = android_unbind_config,
+ .bConfigurationValue = 1,
+ .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
+ .MaxPower = 500, /* 500ma */
+};
+
+static void android_work(struct work_struct *data)
+{
+ struct android_dev *dev = container_of(data, struct android_dev, work);
+ struct usb_composite_dev *cdev = dev->cdev;
+ char *disconnected[2] = { "USB_STATE=DISCONNECTED", NULL };
+ char *connected[2] = { "USB_STATE=CONNECTED", NULL };
+ char *configured[2] = { "USB_STATE=CONFIGURED", NULL };
+ char **uevent_envp = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cdev->lock, flags);
+ if (cdev->config)
+ uevent_envp = configured;
+ else if (dev->connected != dev->sw_connected)
+ uevent_envp = dev->connected ? connected : disconnected;
+ dev->sw_connected = dev->connected;
+ spin_unlock_irqrestore(&cdev->lock, flags);
+
+ if (uevent_envp) {
+ kobject_uevent_env(&dev->dev->kobj, KOBJ_CHANGE, uevent_envp);
+ pr_info("%s: sent uevent %s\n", __func__, uevent_envp[0]);
+ } else {
+ pr_info("%s: did not send uevent (%d %d %p)\n", __func__,
+ dev->connected, dev->sw_connected, cdev->config);
+ }
+}
+
+static void android_enable(struct android_dev *dev)
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ if (WARN_ON(!dev->disable_depth))
+ return;
+
+ if (--dev->disable_depth == 0) {
+ usb_add_config(cdev, &android_config_driver,
+ android_bind_config);
+ usb_gadget_connect(cdev->gadget);
+ }
+}
+
+static void android_disable(struct android_dev *dev)
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ if (dev->disable_depth++ == 0) {
+ usb_gadget_disconnect(cdev->gadget);
+ /* Cancel pending control requests */
+ usb_ep_dequeue(cdev->gadget->ep0, cdev->req);
+ usb_remove_config(cdev, &android_config_driver);
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+/* Supported functions initialization */
+
+struct functionfs_config {
+ bool opened;
+ bool enabled;
+ struct ffs_data *data;
+};
+
+static int ffs_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ f->config = kzalloc(sizeof(struct functionfs_config), GFP_KERNEL);
+ if (!f->config)
+ return -ENOMEM;
+
+ return functionfs_init();
+}
+
+static void ffs_function_cleanup(struct android_usb_function *f)
+{
+ functionfs_cleanup();
+ kfree(f->config);
+}
+
+static void ffs_function_enable(struct android_usb_function *f)
+{
+ struct android_dev *dev = _android_dev;
+ struct functionfs_config *config = f->config;
+
+ config->enabled = true;
+
+ /* Disable the gadget until the function is ready */
+ if (!config->opened)
+ android_disable(dev);
+}
+
+static void ffs_function_disable(struct android_usb_function *f)
+{
+ struct android_dev *dev = _android_dev;
+ struct functionfs_config *config = f->config;
+
+ config->enabled = false;
+
+ /* Balance the disable that was called in closed_callback */
+ if (!config->opened)
+ android_enable(dev);
+}
+
+static int ffs_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ struct functionfs_config *config = f->config;
+ return functionfs_bind_config(c->cdev, c, config->data);
+}
+
+static ssize_t
+ffs_aliases_show(struct device *pdev, struct device_attribute *attr, char *buf)
+{
+ struct android_dev *dev = _android_dev;
+ int ret;
+
+ mutex_lock(&dev->mutex);
+ ret = sprintf(buf, "%s\n", dev->ffs_aliases);
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+
+static ssize_t
+ffs_aliases_store(struct device *pdev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct android_dev *dev = _android_dev;
+ char buff[256];
+
+ mutex_lock(&dev->mutex);
+
+ if (dev->enabled) {
+ mutex_unlock(&dev->mutex);
+ return -EBUSY;
+ }
+
+ strlcpy(buff, buf, sizeof(buff));
+ strlcpy(dev->ffs_aliases, strim(buff), sizeof(dev->ffs_aliases));
+
+ mutex_unlock(&dev->mutex);
+
+ return size;
+}
+
+static DEVICE_ATTR(aliases, S_IRUGO | S_IWUSR, ffs_aliases_show,
+ ffs_aliases_store);
+static struct device_attribute *ffs_function_attributes[] = {
+ &dev_attr_aliases,
+ NULL
+};
+
+static struct android_usb_function ffs_function = {
+ .name = "ffs",
+ .init = ffs_function_init,
+ .enable = ffs_function_enable,
+ .disable = ffs_function_disable,
+ .cleanup = ffs_function_cleanup,
+ .bind_config = ffs_function_bind_config,
+ .attributes = ffs_function_attributes,
+};
+
+static int functionfs_ready_callback(struct ffs_data *ffs)
+{
+ struct android_dev *dev = _android_dev;
+ struct functionfs_config *config = ffs_function.config;
+ int ret = 0;
+
+ mutex_lock(&dev->mutex);
+
+ ret = functionfs_bind(ffs, dev->cdev);
+ if (ret)
+ goto err;
+
+ config->data = ffs;
+ config->opened = true;
+
+ if (config->enabled)
+ android_enable(dev);
+
+err:
+ mutex_unlock(&dev->mutex);
+ return ret;
+}
+
+static void functionfs_closed_callback(struct ffs_data *ffs)
+{
+ struct android_dev *dev = _android_dev;
+ struct functionfs_config *config = ffs_function.config;
+
+ mutex_lock(&dev->mutex);
+
+ if (config->enabled)
+ android_disable(dev);
+
+ config->opened = false;
+ config->data = NULL;
+
+ functionfs_unbind(ffs);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static void *functionfs_acquire_dev_callback(const char *dev_name)
+{
+ return 0;
+}
+
+static void functionfs_release_dev_callback(struct ffs_data *ffs_data)
+{
+}
+
+#define MAX_ACM_INSTANCES 4
+struct acm_function_config {
+ int instances;
+ int instances_on;
+ struct usb_function *f_acm[MAX_ACM_INSTANCES];
+ struct usb_function_instance *f_acm_inst[MAX_ACM_INSTANCES];
+};
+
+static int
+acm_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ int i;
+ int ret;
+ struct acm_function_config *config;
+
+ config = kzalloc(sizeof(struct acm_function_config), GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+ f->config = config;
+
+ for (i = 0; i < MAX_ACM_INSTANCES; i++) {
+ config->f_acm_inst[i] = usb_get_function_instance("acm");
+ if (IS_ERR(config->f_acm_inst[i])) {
+ ret = PTR_ERR(config->f_acm_inst[i]);
+ goto err_usb_get_function_instance;
+ }
+ config->f_acm[i] = usb_get_function(config->f_acm_inst[i]);
+ if (IS_ERR(config->f_acm[i])) {
+ ret = PTR_ERR(config->f_acm[i]);
+ goto err_usb_get_function;
+ }
+ }
+ return 0;
+err_usb_get_function_instance:
+ while (i-- > 0) {
+ usb_put_function(config->f_acm[i]);
+err_usb_get_function:
+ usb_put_function_instance(config->f_acm_inst[i]);
+ }
+ return ret;
+}
+
+static void acm_function_cleanup(struct android_usb_function *f)
+{
+ int i;
+ struct acm_function_config *config = f->config;
+
+ for (i = 0; i < MAX_ACM_INSTANCES; i++) {
+ usb_put_function(config->f_acm[i]);
+ usb_put_function_instance(config->f_acm_inst[i]);
+ }
+ kfree(f->config);
+ f->config = NULL;
+}
+
+static int
+acm_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ int i;
+ int ret = 0;
+ struct acm_function_config *config = f->config;
+
+ config->instances_on = config->instances;
+ for (i = 0; i < config->instances_on; i++) {
+ ret = usb_add_function(c, config->f_acm[i]);
+ if (ret) {
+ pr_err("Could not bind acm%u config\n", i);
+ goto err_usb_add_function;
+ }
+ }
+
+ return 0;
+
+err_usb_add_function:
+ while (i-- > 0)
+ usb_remove_function(c, config->f_acm[i]);
+ return ret;
+}
+
+static void acm_function_unbind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ int i;
+ struct acm_function_config *config = f->config;
+
+ for (i = 0; i < config->instances_on; i++)
+ usb_remove_function(c, config->f_acm[i]);
+}
+
+static ssize_t acm_instances_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct acm_function_config *config = f->config;
+ return sprintf(buf, "%d\n", config->instances);
+}
+
+static ssize_t acm_instances_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct acm_function_config *config = f->config;
+ int value;
+
+ sscanf(buf, "%d", &value);
+ if (value > MAX_ACM_INSTANCES)
+ value = MAX_ACM_INSTANCES;
+ config->instances = value;
+ return size;
+}
+
+static DEVICE_ATTR(instances, S_IRUGO | S_IWUSR, acm_instances_show,
+ acm_instances_store);
+static struct device_attribute *acm_function_attributes[] = {
+ &dev_attr_instances,
+ NULL
+};
+
+static struct android_usb_function acm_function = {
+ .name = "acm",
+ .init = acm_function_init,
+ .cleanup = acm_function_cleanup,
+ .bind_config = acm_function_bind_config,
+ .unbind_config = acm_function_unbind_config,
+ .attributes = acm_function_attributes,
+};
+
+
+static int
+mtp_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ return mtp_setup();
+}
+
+static void mtp_function_cleanup(struct android_usb_function *f)
+{
+ mtp_cleanup();
+}
+
+static int
+mtp_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ return mtp_bind_config(c, false);
+}
+
+static int
+ptp_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ /* nothing to do - initialization is handled by mtp_function_init */
+ return 0;
+}
+
+static void ptp_function_cleanup(struct android_usb_function *f)
+{
+ /* nothing to do - cleanup is handled by mtp_function_cleanup */
+}
+
+static int
+ptp_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ return mtp_bind_config(c, true);
+}
+
+static int mtp_function_ctrlrequest(struct android_usb_function *f,
+ struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *c)
+{
+ return mtp_ctrlrequest(cdev, c);
+}
+
+static struct android_usb_function mtp_function = {
+ .name = "mtp",
+ .init = mtp_function_init,
+ .cleanup = mtp_function_cleanup,
+ .bind_config = mtp_function_bind_config,
+ .ctrlrequest = mtp_function_ctrlrequest,
+};
+
+/* PTP function is same as MTP with slightly different interface descriptor */
+static struct android_usb_function ptp_function = {
+ .name = "ptp",
+ .init = ptp_function_init,
+ .cleanup = ptp_function_cleanup,
+ .bind_config = ptp_function_bind_config,
+};
+
+
+struct rndis_function_config {
+ u8 ethaddr[ETH_ALEN];
+ u32 vendorID;
+ char manufacturer[256];
+ /* "Wireless" RNDIS; auto-detected by Windows */
+ bool wceis;
+ struct eth_dev *dev;
+};
+
+static int
+rndis_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ f->config = kzalloc(sizeof(struct rndis_function_config), GFP_KERNEL);
+ if (!f->config)
+ return -ENOMEM;
+ return 0;
+}
+
+static void rndis_function_cleanup(struct android_usb_function *f)
+{
+ kfree(f->config);
+ f->config = NULL;
+}
+
+static int
+rndis_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ int ret;
+ struct eth_dev *dev;
+ struct rndis_function_config *rndis = f->config;
+
+ if (!rndis) {
+ pr_err("%s: rndis_pdata\n", __func__);
+ return -1;
+ }
+
+ pr_info("%s MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", __func__,
+ rndis->ethaddr[0], rndis->ethaddr[1], rndis->ethaddr[2],
+ rndis->ethaddr[3], rndis->ethaddr[4], rndis->ethaddr[5]);
+
+ dev = gether_setup_name(c->cdev->gadget, rndis->ethaddr, "rndis");
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
+ pr_err("%s: gether_setup failed\n", __func__);
+ return ret;
+ }
+ rndis->dev = dev;
+
+ if (rndis->wceis) {
+ /* "Wireless" RNDIS; auto-detected by Windows */
+ rndis_iad_descriptor.bFunctionClass =
+ USB_CLASS_WIRELESS_CONTROLLER;
+ rndis_iad_descriptor.bFunctionSubClass = 0x01;
+ rndis_iad_descriptor.bFunctionProtocol = 0x03;
+ rndis_control_intf.bInterfaceClass =
+ USB_CLASS_WIRELESS_CONTROLLER;
+ rndis_control_intf.bInterfaceSubClass = 0x01;
+ rndis_control_intf.bInterfaceProtocol = 0x03;
+ }
+
+ return rndis_bind_config_vendor(c, rndis->ethaddr, rndis->vendorID,
+ rndis->manufacturer, rndis->dev);
+}
+
+static void rndis_function_unbind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ struct rndis_function_config *rndis = f->config;
+ gether_cleanup(rndis->dev);
+}
+
+static ssize_t rndis_manufacturer_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *config = f->config;
+ return sprintf(buf, "%s\n", config->manufacturer);
+}
+
+static ssize_t rndis_manufacturer_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *config = f->config;
+
+ if (size >= sizeof(config->manufacturer))
+ return -EINVAL;
+ if (sscanf(buf, "%s", config->manufacturer) == 1)
+ return size;
+ return -1;
+}
+
+static DEVICE_ATTR(manufacturer, S_IRUGO | S_IWUSR, rndis_manufacturer_show,
+ rndis_manufacturer_store);
+
+static ssize_t rndis_wceis_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *config = f->config;
+ return sprintf(buf, "%d\n", config->wceis);
+}
+
+static ssize_t rndis_wceis_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *config = f->config;
+ int value;
+
+ if (sscanf(buf, "%d", &value) == 1) {
+ config->wceis = value;
+ return size;
+ }
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(wceis, S_IRUGO | S_IWUSR, rndis_wceis_show,
+ rndis_wceis_store);
+
+static ssize_t rndis_ethaddr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *rndis = f->config;
+ return sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ rndis->ethaddr[0], rndis->ethaddr[1], rndis->ethaddr[2],
+ rndis->ethaddr[3], rndis->ethaddr[4], rndis->ethaddr[5]);
+}
+
+static ssize_t rndis_ethaddr_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *rndis = f->config;
+
+ if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ (int *)&rndis->ethaddr[0], (int *)&rndis->ethaddr[1],
+ (int *)&rndis->ethaddr[2], (int *)&rndis->ethaddr[3],
+ (int *)&rndis->ethaddr[4], (int *)&rndis->ethaddr[5]) == 6)
+ return size;
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(ethaddr, S_IRUGO | S_IWUSR, rndis_ethaddr_show,
+ rndis_ethaddr_store);
+
+static ssize_t rndis_vendorID_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *config = f->config;
+ return sprintf(buf, "%04x\n", config->vendorID);
+}
+
+static ssize_t rndis_vendorID_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *config = f->config;
+ int value;
+
+ if (sscanf(buf, "%04x", &value) == 1) {
+ config->vendorID = value;
+ return size;
+ }
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(vendorID, S_IRUGO | S_IWUSR, rndis_vendorID_show,
+ rndis_vendorID_store);
+
+static struct device_attribute *rndis_function_attributes[] = {
+ &dev_attr_manufacturer,
+ &dev_attr_wceis,
+ &dev_attr_ethaddr,
+ &dev_attr_vendorID,
+ NULL
+};
+
+static struct android_usb_function rndis_function = {
+ .name = "rndis",
+ .init = rndis_function_init,
+ .cleanup = rndis_function_cleanup,
+ .bind_config = rndis_function_bind_config,
+ .unbind_config = rndis_function_unbind_config,
+ .attributes = rndis_function_attributes,
+};
+
+
+struct mass_storage_function_config {
+ struct fsg_config fsg;
+ struct fsg_common *common;
+};
+
+static int mass_storage_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ struct mass_storage_function_config *config;
+ struct fsg_common *common;
+ int err;
+
+ config = kzalloc(sizeof(struct mass_storage_function_config),
+ GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+
+ config->fsg.nluns = 1;
+ config->fsg.luns[0].removable = 1;
+
+ common = fsg_common_init(NULL, cdev, &config->fsg);
+ if (IS_ERR(common)) {
+ kfree(config);
+ return PTR_ERR(common);
+ }
+
+ err = sysfs_create_link(&f->dev->kobj,
+ &common->luns[0].dev.kobj,
+ "lun");
+ if (err) {
+ kfree(config);
+ return err;
+ }
+
+ config->common = common;
+ f->config = config;
+ return 0;
+}
+
+static void mass_storage_function_cleanup(struct android_usb_function *f)
+{
+ kfree(f->config);
+ f->config = NULL;
+}
+
+static int mass_storage_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ struct mass_storage_function_config *config = f->config;
+ return fsg_bind_config(c->cdev, c, config->common);
+}
+
+static ssize_t mass_storage_inquiry_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct mass_storage_function_config *config = f->config;
+ return sprintf(buf, "%s\n", config->common->inquiry_string);
+}
+
+static ssize_t mass_storage_inquiry_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct mass_storage_function_config *config = f->config;
+ if (size >= sizeof(config->common->inquiry_string))
+ return -EINVAL;
+ if (sscanf(buf, "%s", config->common->inquiry_string) != 1)
+ return -EINVAL;
+ return size;
+}
+
+static DEVICE_ATTR(inquiry_string, S_IRUGO | S_IWUSR,
+ mass_storage_inquiry_show,
+ mass_storage_inquiry_store);
+
+static struct device_attribute *mass_storage_function_attributes[] = {
+ &dev_attr_inquiry_string,
+ NULL
+};
+
+static struct android_usb_function mass_storage_function = {
+ .name = "mass_storage",
+ .init = mass_storage_function_init,
+ .cleanup = mass_storage_function_cleanup,
+ .bind_config = mass_storage_function_bind_config,
+ .attributes = mass_storage_function_attributes,
+};
+
+
+static int accessory_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ return acc_setup();
+}
+
+static void accessory_function_cleanup(struct android_usb_function *f)
+{
+ acc_cleanup();
+}
+
+static int accessory_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ return acc_bind_config(c);
+}
+
+static int accessory_function_ctrlrequest(struct android_usb_function *f,
+ struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *c)
+{
+ return acc_ctrlrequest(cdev, c);
+}
+
+static struct android_usb_function accessory_function = {
+ .name = "accessory",
+ .init = accessory_function_init,
+ .cleanup = accessory_function_cleanup,
+ .bind_config = accessory_function_bind_config,
+ .ctrlrequest = accessory_function_ctrlrequest,
+};
+
+static int audio_source_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ struct audio_source_config *config;
+
+ config = kzalloc(sizeof(struct audio_source_config), GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+ config->card = -1;
+ config->device = -1;
+ f->config = config;
+ return 0;
+}
+
+static void audio_source_function_cleanup(struct android_usb_function *f)
+{
+ kfree(f->config);
+}
+
+static int audio_source_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ struct audio_source_config *config = f->config;
+
+ return audio_source_bind_config(c, config);
+}
+
+static void audio_source_function_unbind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ struct audio_source_config *config = f->config;
+
+ config->card = -1;
+ config->device = -1;
+}
+
+static ssize_t audio_source_pcm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct audio_source_config *config = f->config;
+
+ /* print PCM card and device numbers */
+ return sprintf(buf, "%d %d\n", config->card, config->device);
+}
+
+static DEVICE_ATTR(pcm, S_IRUGO, audio_source_pcm_show, NULL);
+
+static struct device_attribute *audio_source_function_attributes[] = {
+ &dev_attr_pcm,
+ NULL
+};
+
+static struct android_usb_function audio_source_function = {
+ .name = "audio_source",
+ .init = audio_source_function_init,
+ .cleanup = audio_source_function_cleanup,
+ .bind_config = audio_source_function_bind_config,
+ .unbind_config = audio_source_function_unbind_config,
+ .attributes = audio_source_function_attributes,
+};
+
+static struct android_usb_function *supported_functions[] = {
+ &ffs_function,
+ &acm_function,
+ &mtp_function,
+ &ptp_function,
+ &rndis_function,
+ &mass_storage_function,
+ &accessory_function,
+ &audio_source_function,
+ NULL
+};
+
+
+static int android_init_functions(struct android_usb_function **functions,
+ struct usb_composite_dev *cdev)
+{
+ struct android_dev *dev = _android_dev;
+ struct android_usb_function *f;
+ struct device_attribute **attrs;
+ struct device_attribute *attr;
+ int err;
+ int index = 0;
+
+ for (; (f = *functions++); index++) {
+ f->dev_name = kasprintf(GFP_KERNEL, "f_%s", f->name);
+ f->dev = device_create(android_class, dev->dev,
+ MKDEV(0, index), f, f->dev_name);
+ if (IS_ERR(f->dev)) {
+ pr_err("%s: Failed to create dev %s", __func__,
+ f->dev_name);
+ err = PTR_ERR(f->dev);
+ goto err_create;
+ }
+
+ if (f->init) {
+ err = f->init(f, cdev);
+ if (err) {
+ pr_err("%s: Failed to init %s", __func__,
+ f->name);
+ goto err_out;
+ }
+ }
+
+ attrs = f->attributes;
+ if (attrs) {
+ while ((attr = *attrs++) && !err)
+ err = device_create_file(f->dev, attr);
+ }
+ if (err) {
+ pr_err("%s: Failed to create function %s attributes",
+ __func__, f->name);
+ goto err_out;
+ }
+ }
+ return 0;
+
+err_out:
+ device_destroy(android_class, f->dev->devt);
+err_create:
+ kfree(f->dev_name);
+ return err;
+}
+
+static void android_cleanup_functions(struct android_usb_function **functions)
+{
+ struct android_usb_function *f;
+
+ while (*functions) {
+ f = *functions++;
+
+ if (f->dev) {
+ device_destroy(android_class, f->dev->devt);
+ kfree(f->dev_name);
+ }
+
+ if (f->cleanup)
+ f->cleanup(f);
+ }
+}
+
+static int
+android_bind_enabled_functions(struct android_dev *dev,
+ struct usb_configuration *c)
+{
+ struct android_usb_function *f;
+ int ret;
+
+ list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+ ret = f->bind_config(f, c);
+ if (ret) {
+ pr_err("%s: %s failed", __func__, f->name);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void
+android_unbind_enabled_functions(struct android_dev *dev,
+ struct usb_configuration *c)
+{
+ struct android_usb_function *f;
+
+ list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+ if (f->unbind_config)
+ f->unbind_config(f, c);
+ }
+}
+
+static int android_enable_function(struct android_dev *dev, char *name)
+{
+ struct android_usb_function **functions = dev->functions;
+ struct android_usb_function *f;
+ while ((f = *functions++)) {
+ if (!strcmp(name, f->name)) {
+ list_add_tail(&f->enabled_list,
+ &dev->enabled_functions);
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+/*-------------------------------------------------------------------------*/
+/* /sys/class/android_usb/android%d/ interface */
+
+static ssize_t
+functions_show(struct device *pdev, struct device_attribute *attr, char *buf)
+{
+ struct android_dev *dev = dev_get_drvdata(pdev);
+ struct android_usb_function *f;
+ char *buff = buf;
+
+ mutex_lock(&dev->mutex);
+
+ list_for_each_entry(f, &dev->enabled_functions, enabled_list)
+ buff += sprintf(buff, "%s,", f->name);
+
+ mutex_unlock(&dev->mutex);
+
+ if (buff != buf)
+ *(buff-1) = '\n';
+ return buff - buf;
+}
+
+static ssize_t
+functions_store(struct device *pdev, struct device_attribute *attr,
+ const char *buff, size_t size)
+{
+ struct android_dev *dev = dev_get_drvdata(pdev);
+ char *name;
+ char buf[256], *b;
+ char aliases[256], *a;
+ int err;
+ int is_ffs;
+ int ffs_enabled = 0;
+
+ mutex_lock(&dev->mutex);
+
+ if (dev->enabled) {
+ mutex_unlock(&dev->mutex);
+ return -EBUSY;
+ }
+
+ INIT_LIST_HEAD(&dev->enabled_functions);
+
+ strlcpy(buf, buff, sizeof(buf));
+ b = strim(buf);
+
+ while (b) {
+ name = strsep(&b, ",");
+ if (!name)
+ continue;
+
+ is_ffs = 0;
+ strlcpy(aliases, dev->ffs_aliases, sizeof(aliases));
+ a = aliases;
+
+ while (a) {
+ char *alias = strsep(&a, ",");
+ if (alias && !strcmp(name, alias)) {
+ is_ffs = 1;
+ break;
+ }
+ }
+
+ if (is_ffs) {
+ if (ffs_enabled)
+ continue;
+ err = android_enable_function(dev, "ffs");
+ if (err)
+ pr_err("android_usb: Cannot enable ffs (%d)",
+ err);
+ else
+ ffs_enabled = 1;
+ continue;
+ }
+
+ err = android_enable_function(dev, name);
+ if (err)
+ pr_err("android_usb: Cannot enable '%s' (%d)",
+ name, err);
+ }
+
+ mutex_unlock(&dev->mutex);
+
+ return size;
+}
+
+static ssize_t enable_show(struct device *pdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct android_dev *dev = dev_get_drvdata(pdev);
+ return sprintf(buf, "%d\n", dev->enabled);
+}
+
+static ssize_t enable_store(struct device *pdev, struct device_attribute *attr,
+ const char *buff, size_t size)
+{
+ struct android_dev *dev = dev_get_drvdata(pdev);
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct android_usb_function *f;
+ int enabled = 0;
+
+
+ if (!cdev)
+ return -ENODEV;
+
+ mutex_lock(&dev->mutex);
+
+ sscanf(buff, "%d", &enabled);
+ if (enabled && !dev->enabled) {
+ /*
+ * Update values in composite driver's copy of
+ * device descriptor.
+ */
+ cdev->desc.idVendor = device_desc.idVendor;
+ cdev->desc.idProduct = device_desc.idProduct;
+ cdev->desc.bcdDevice = device_desc.bcdDevice;
+ cdev->desc.bDeviceClass = device_desc.bDeviceClass;
+ cdev->desc.bDeviceSubClass = device_desc.bDeviceSubClass;
+ cdev->desc.bDeviceProtocol = device_desc.bDeviceProtocol;
+ list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+ if (f->enable)
+ f->enable(f);
+ }
+ android_enable(dev);
+ dev->enabled = true;
+ } else if (!enabled && dev->enabled) {
+ android_disable(dev);
+ list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+ if (f->disable)
+ f->disable(f);
+ }
+ dev->enabled = false;
+ } else {
+ pr_err("android_usb: already %s\n",
+ dev->enabled ? "enabled" : "disabled");
+ }
+
+ mutex_unlock(&dev->mutex);
+ return size;
+}
+
+static ssize_t state_show(struct device *pdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct android_dev *dev = dev_get_drvdata(pdev);
+ struct usb_composite_dev *cdev = dev->cdev;
+ char *state = "DISCONNECTED";
+ unsigned long flags;
+
+ if (!cdev)
+ goto out;
+
+ spin_lock_irqsave(&cdev->lock, flags);
+ if (cdev->config)
+ state = "CONFIGURED";
+ else if (dev->connected)
+ state = "CONNECTED";
+ spin_unlock_irqrestore(&cdev->lock, flags);
+out:
+ return sprintf(buf, "%s\n", state);
+}
+
+#define DESCRIPTOR_ATTR(field, format_string) \
+static ssize_t \
+field ## _show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return sprintf(buf, format_string, device_desc.field); \
+} \
+static ssize_t \
+field ## _store(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t size) \
+{ \
+ int value; \
+ if (sscanf(buf, format_string, &value) == 1) { \
+ device_desc.field = value; \
+ return size; \
+ } \
+ return -1; \
+} \
+static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, field ## _show, field ## _store);
+
+#define DESCRIPTOR_STRING_ATTR(field, buffer) \
+static ssize_t \
+field ## _show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return sprintf(buf, "%s", buffer); \
+} \
+static ssize_t \
+field ## _store(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t size) \
+{ \
+ if (size >= sizeof(buffer)) \
+ return -EINVAL; \
+ return strlcpy(buffer, buf, sizeof(buffer)); \
+} \
+static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, field ## _show, field ## _store);
+
+
+DESCRIPTOR_ATTR(idVendor, "%04x\n")
+DESCRIPTOR_ATTR(idProduct, "%04x\n")
+DESCRIPTOR_ATTR(bcdDevice, "%04x\n")
+DESCRIPTOR_ATTR(bDeviceClass, "%d\n")
+DESCRIPTOR_ATTR(bDeviceSubClass, "%d\n")
+DESCRIPTOR_ATTR(bDeviceProtocol, "%d\n")
+DESCRIPTOR_STRING_ATTR(iManufacturer, manufacturer_string)
+DESCRIPTOR_STRING_ATTR(iProduct, product_string)
+DESCRIPTOR_STRING_ATTR(iSerial, serial_string)
+
+static DEVICE_ATTR(functions, S_IRUGO | S_IWUSR, functions_show,
+ functions_store);
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, enable_show, enable_store);
+static DEVICE_ATTR(state, S_IRUGO, state_show, NULL);
+
+static struct device_attribute *android_usb_attributes[] = {
+ &dev_attr_idVendor,
+ &dev_attr_idProduct,
+ &dev_attr_bcdDevice,
+ &dev_attr_bDeviceClass,
+ &dev_attr_bDeviceSubClass,
+ &dev_attr_bDeviceProtocol,
+ &dev_attr_iManufacturer,
+ &dev_attr_iProduct,
+ &dev_attr_iSerial,
+ &dev_attr_functions,
+ &dev_attr_enable,
+ &dev_attr_state,
+ NULL
+};
+
+/*-------------------------------------------------------------------------*/
+/* Composite driver */
+
+static int android_bind_config(struct usb_configuration *c)
+{
+ struct android_dev *dev = _android_dev;
+ int ret = 0;
+
+ ret = android_bind_enabled_functions(dev, c);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void android_unbind_config(struct usb_configuration *c)
+{
+ struct android_dev *dev = _android_dev;
+
+ android_unbind_enabled_functions(dev, c);
+}
+
+static int android_bind(struct usb_composite_dev *cdev)
+{
+ struct android_dev *dev = _android_dev;
+ struct usb_gadget *gadget = cdev->gadget;
+ int id, ret;
+
+ /*
+ * Start disconnected. Userspace will connect the gadget once
+ * it is done configuring the functions.
+ */
+ usb_gadget_disconnect(gadget);
+
+ ret = android_init_functions(dev->functions, cdev);
+ if (ret)
+ return ret;
+
+ /* Allocate string descriptor numbers ... note that string
+ * contents can be overridden by the composite_dev glue.
+ */
+ id = usb_string_id(cdev);
+ if (id < 0)
+ return id;
+ strings_dev[STRING_MANUFACTURER_IDX].id = id;
+ device_desc.iManufacturer = id;
+
+ id = usb_string_id(cdev);
+ if (id < 0)
+ return id;
+ strings_dev[STRING_PRODUCT_IDX].id = id;
+ device_desc.iProduct = id;
+
+ /* Default strings - should be updated by userspace */
+ strncpy(manufacturer_string, "Android", sizeof(manufacturer_string)-1);
+ strncpy(product_string, "Android", sizeof(product_string) - 1);
+ strncpy(serial_string, "0123456789ABCDEF", sizeof(serial_string) - 1);
+
+ id = usb_string_id(cdev);
+ if (id < 0)
+ return id;
+ strings_dev[STRING_SERIAL_IDX].id = id;
+ device_desc.iSerialNumber = id;
+
+ usb_gadget_set_selfpowered(gadget);
+ dev->cdev = cdev;
+
+ return 0;
+}
+
+static int android_usb_unbind(struct usb_composite_dev *cdev)
+{
+ struct android_dev *dev = _android_dev;
+
+ cancel_work_sync(&dev->work);
+ android_cleanup_functions(dev->functions);
+ return 0;
+}
+
+/* HACK: android needs to override setup for accessory to work */
+static int (*composite_setup_func)(struct usb_gadget *gadget, const struct usb_ctrlrequest *c);
+
+static int
+android_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *c)
+{
+ struct android_dev *dev = _android_dev;
+ struct usb_composite_dev *cdev = get_gadget_data(gadget);
+ struct usb_request *req = cdev->req;
+ struct android_usb_function *f;
+ int value = -EOPNOTSUPP;
+ unsigned long flags;
+
+ req->zero = 0;
+ req->length = 0;
+ gadget->ep0->driver_data = cdev;
+
+ list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+ if (f->ctrlrequest) {
+ value = f->ctrlrequest(f, cdev, c);
+ if (value >= 0)
+ break;
+ }
+ }
+
+ /* Special case the accessory function.
+ * It needs to handle control requests before it is enabled.
+ */
+ if (value < 0)
+ value = acc_ctrlrequest(cdev, c);
+
+ if (value < 0)
+ value = composite_setup_func(gadget, c);
+
+ spin_lock_irqsave(&cdev->lock, flags);
+ if (!dev->connected) {
+ dev->connected = 1;
+ schedule_work(&dev->work);
+ } else if (c->bRequest == USB_REQ_SET_CONFIGURATION &&
+ cdev->config) {
+ schedule_work(&dev->work);
+ }
+ spin_unlock_irqrestore(&cdev->lock, flags);
+
+ return value;
+}
+
+static void android_disconnect(struct usb_composite_dev *cdev)
+{
+ struct android_dev *dev = _android_dev;
+
+ /* accessory HID support can be active while the
+ accessory function is not actually enabled,
+ so we need to inform it when we are disconnected.
+ */
+ acc_disconnect();
+
+ dev->connected = 0;
+ schedule_work(&dev->work);
+}
+
+static struct usb_composite_driver android_usb_driver = {
+ .name = "android_usb",
+ .dev = &device_desc,
+ .strings = dev_strings,
+ .bind = android_bind,
+ .unbind = android_usb_unbind,
+ .disconnect = android_disconnect,
+ .max_speed = USB_SPEED_HIGH,
+};
+
+static int android_create_device(struct android_dev *dev)
+{
+ struct device_attribute **attrs = android_usb_attributes;
+ struct device_attribute *attr;
+ int err;
+
+ dev->dev = device_create(android_class, NULL,
+ MKDEV(0, 0), NULL, "android0");
+ if (IS_ERR(dev->dev))
+ return PTR_ERR(dev->dev);
+
+ dev_set_drvdata(dev->dev, dev);
+
+ while ((attr = *attrs++)) {
+ err = device_create_file(dev->dev, attr);
+ if (err) {
+ device_destroy(android_class, dev->dev->devt);
+ return err;
+ }
+ }
+ return 0;
+}
+
+
+static int __init init(void)
+{
+ struct android_dev *dev;
+ int err;
+
+ android_class = class_create(THIS_MODULE, "android_usb");
+ if (IS_ERR(android_class))
+ return PTR_ERR(android_class);
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_dev;
+ }
+
+ dev->disable_depth = 1;
+ dev->functions = supported_functions;
+ INIT_LIST_HEAD(&dev->enabled_functions);
+ INIT_WORK(&dev->work, android_work);
+ mutex_init(&dev->mutex);
+
+ err = android_create_device(dev);
+ if (err) {
+ pr_err("%s: failed to create android device %d", __func__, err);
+ goto err_create;
+ }
+
+ _android_dev = dev;
+
+ err = usb_composite_probe(&android_usb_driver);
+ if (err) {
+ pr_err("%s: failed to probe driver %d", __func__, err);
+ goto err_probe;
+ }
+
+ /* HACK: exchange composite's setup with ours */
+ composite_setup_func = android_usb_driver.gadget_driver.setup;
+ android_usb_driver.gadget_driver.setup = android_setup;
+
+ return 0;
+
+err_probe:
+ device_destroy(android_class, dev->dev->devt);
+err_create:
+ kfree(dev);
+err_dev:
+ class_destroy(android_class);
+ return err;
+}
+late_initcall(init);
+
+static void __exit cleanup(void)
+{
+ usb_composite_unregister(&android_usb_driver);
+ class_destroy(android_class);
+ kfree(_android_dev);
+ _android_dev = NULL;
+}
+module_exit(cleanup);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 44a292b7501..2dd57853de6 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -812,7 +812,7 @@ done:
}
EXPORT_SYMBOL_GPL(usb_add_config);
-static void remove_config(struct usb_composite_dev *cdev,
+static void unbind_config(struct usb_composite_dev *cdev,
struct usb_configuration *config)
{
while (!list_empty(&config->functions)) {
@@ -827,7 +827,6 @@ static void remove_config(struct usb_composite_dev *cdev,
/* may free memory for "f" */
}
}
- list_del(&config->list);
if (config->unbind) {
DBG(cdev, "unbind config '%s'/%p\n", config->label, config);
config->unbind(config);
@@ -854,9 +853,11 @@ void usb_remove_config(struct usb_composite_dev *cdev,
if (cdev->config == config)
reset_config(cdev);
+ list_del(&config->list);
+
spin_unlock_irqrestore(&cdev->lock, flags);
- remove_config(cdev, config);
+ unbind_config(cdev, config);
}
/*-------------------------------------------------------------------------*/
@@ -1525,7 +1526,8 @@ static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
struct usb_configuration *c;
c = list_first_entry(&cdev->configs,
struct usb_configuration, list);
- remove_config(cdev, c);
+ list_del(&c->list);
+ unbind_config(cdev, c);
}
if (cdev->driver->unbind && unbind_driver)
cdev->driver->unbind(cdev);
diff --git a/drivers/usb/gadget/f_accessory.c b/drivers/usb/gadget/f_accessory.c
new file mode 100644
index 00000000000..092964c2b50
--- /dev/null
+++ b/drivers/usb/gadget/f_accessory.c
@@ -0,0 +1,1180 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include <linux/hid.h>
+#include <linux/hiddev.h>
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/f_accessory.h>
+
+#define BULK_BUFFER_SIZE 16384
+#define ACC_STRING_SIZE 256
+
+#define PROTOCOL_VERSION 2
+
+/* String IDs */
+#define INTERFACE_STRING_INDEX 0
+
+/* number of tx and rx requests to allocate */
+#define TX_REQ_MAX 4
+#define RX_REQ_MAX 2
+
+struct acc_hid_dev {
+ struct list_head list;
+ struct hid_device *hid;
+ struct acc_dev *dev;
+ /* accessory defined ID */
+ int id;
+ /* HID report descriptor */
+ u8 *report_desc;
+ /* length of HID report descriptor */
+ int report_desc_len;
+ /* number of bytes of report_desc we have received so far */
+ int report_desc_offset;
+};
+
+struct acc_dev {
+ struct usb_function function;
+ struct usb_composite_dev *cdev;
+ spinlock_t lock;
+
+ struct usb_ep *ep_in;
+ struct usb_ep *ep_out;
+
+ /* set to 1 when we connect */
+ int online:1;
+ /* Set to 1 when we disconnect.
+ * Not cleared until our file is closed.
+ */
+ int disconnected:1;
+
+ /* strings sent by the host */
+ char manufacturer[ACC_STRING_SIZE];
+ char model[ACC_STRING_SIZE];
+ char description[ACC_STRING_SIZE];
+ char version[ACC_STRING_SIZE];
+ char uri[ACC_STRING_SIZE];
+ char serial[ACC_STRING_SIZE];
+
+ /* for acc_complete_set_string */
+ int string_index;
+
+ /* set to 1 if we have a pending start request */
+ int start_requested;
+
+ int audio_mode;
+
+ /* synchronize access to our device file */
+ atomic_t open_excl;
+
+ struct list_head tx_idle;
+
+ wait_queue_head_t read_wq;
+ wait_queue_head_t write_wq;
+ struct usb_request *rx_req[RX_REQ_MAX];
+ int rx_done;
+
+ /* delayed work for handling ACCESSORY_START */
+ struct delayed_work start_work;
+
+ /* worker for registering and unregistering hid devices */
+ struct work_struct hid_work;
+
+ /* list of active HID devices */
+ struct list_head hid_list;
+
+ /* list of new HID devices to register */
+ struct list_head new_hid_list;
+
+ /* list of dead HID devices to unregister */
+ struct list_head dead_hid_list;
+};
+
+static struct usb_interface_descriptor acc_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 0,
+ .bNumEndpoints = 2,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
+ .bInterfaceProtocol = 0,
+};
+
+static struct usb_endpoint_descriptor acc_highspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor acc_highspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor acc_fullspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor acc_fullspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *fs_acc_descs[] = {
+ (struct usb_descriptor_header *) &acc_interface_desc,
+ (struct usb_descriptor_header *) &acc_fullspeed_in_desc,
+ (struct usb_descriptor_header *) &acc_fullspeed_out_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *hs_acc_descs[] = {
+ (struct usb_descriptor_header *) &acc_interface_desc,
+ (struct usb_descriptor_header *) &acc_highspeed_in_desc,
+ (struct usb_descriptor_header *) &acc_highspeed_out_desc,
+ NULL,
+};
+
+static struct usb_string acc_string_defs[] = {
+ [INTERFACE_STRING_INDEX].s = "Android Accessory Interface",
+ { }, /* end of list */
+};
+
+static struct usb_gadget_strings acc_string_table = {
+ .language = 0x0409, /* en-US */
+ .strings = acc_string_defs,
+};
+
+static struct usb_gadget_strings *acc_strings[] = {
+ &acc_string_table,
+ NULL,
+};
+
+/* temporary variable used between acc_open() and acc_gadget_bind() */
+static struct acc_dev *_acc_dev;
+
+static inline struct acc_dev *func_to_dev(struct usb_function *f)
+{
+ return container_of(f, struct acc_dev, function);
+}
+
+static struct usb_request *acc_request_new(struct usb_ep *ep, int buffer_size)
+{
+ struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ /* now allocate buffers for the requests */
+ req->buf = kmalloc(buffer_size, GFP_KERNEL);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return NULL;
+ }
+
+ return req;
+}
+
+static void acc_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+ if (req) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+/* add a request to the tail of a list */
+static void req_put(struct acc_dev *dev, struct list_head *head,
+ struct usb_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&req->list, head);
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request *req_get(struct acc_dev *dev, struct list_head *head)
+{
+ unsigned long flags;
+ struct usb_request *req;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (list_empty(head)) {
+ req = 0;
+ } else {
+ req = list_first_entry(head, struct usb_request, list);
+ list_del(&req->list);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return req;
+}
+
+static void acc_set_disconnected(struct acc_dev *dev)
+{
+ dev->online = 0;
+ dev->disconnected = 1;
+}
+
+static void acc_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+ struct acc_dev *dev = _acc_dev;
+
+ if (req->status != 0)
+ acc_set_disconnected(dev);
+
+ req_put(dev, &dev->tx_idle, req);
+
+ wake_up(&dev->write_wq);
+}
+
+static void acc_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+ struct acc_dev *dev = _acc_dev;
+
+ dev->rx_done = 1;
+ if (req->status != 0)
+ acc_set_disconnected(dev);
+
+ wake_up(&dev->read_wq);
+}
+
+static void acc_complete_set_string(struct usb_ep *ep, struct usb_request *req)
+{
+ struct acc_dev *dev = ep->driver_data;
+ char *string_dest = NULL;
+ int length = req->actual;
+
+ if (req->status != 0) {
+ pr_err("acc_complete_set_string, err %d\n", req->status);
+ return;
+ }
+
+ switch (dev->string_index) {
+ case ACCESSORY_STRING_MANUFACTURER:
+ string_dest = dev->manufacturer;
+ break;
+ case ACCESSORY_STRING_MODEL:
+ string_dest = dev->model;
+ break;
+ case ACCESSORY_STRING_DESCRIPTION:
+ string_dest = dev->description;
+ break;
+ case ACCESSORY_STRING_VERSION:
+ string_dest = dev->version;
+ break;
+ case ACCESSORY_STRING_URI:
+ string_dest = dev->uri;
+ break;
+ case ACCESSORY_STRING_SERIAL:
+ string_dest = dev->serial;
+ break;
+ }
+ if (string_dest) {
+ unsigned long flags;
+
+ if (length >= ACC_STRING_SIZE)
+ length = ACC_STRING_SIZE - 1;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ memcpy(string_dest, req->buf, length);
+ /* ensure zero termination */
+ string_dest[length] = 0;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ } else {
+ pr_err("unknown accessory string index %d\n",
+ dev->string_index);
+ }
+}
+
+static void acc_complete_set_hid_report_desc(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct acc_hid_dev *hid = req->context;
+ struct acc_dev *dev = hid->dev;
+ int length = req->actual;
+
+ if (req->status != 0) {
+ pr_err("acc_complete_set_hid_report_desc, err %d\n",
+ req->status);
+ return;
+ }
+
+ memcpy(hid->report_desc + hid->report_desc_offset, req->buf, length);
+ hid->report_desc_offset += length;
+ if (hid->report_desc_offset == hid->report_desc_len) {
+ /* After we have received the entire report descriptor
+ * we schedule work to initialize the HID device
+ */
+ schedule_work(&dev->hid_work);
+ }
+}
+
+static void acc_complete_send_hid_event(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct acc_hid_dev *hid = req->context;
+ int length = req->actual;
+
+ if (req->status != 0) {
+ pr_err("acc_complete_send_hid_event, err %d\n", req->status);
+ return;
+ }
+
+ hid_report_raw_event(hid->hid, HID_INPUT_REPORT, req->buf, length, 1);
+}
+
+static int acc_hid_parse(struct hid_device *hid)
+{
+ struct acc_hid_dev *hdev = hid->driver_data;
+
+ hid_parse_report(hid, hdev->report_desc, hdev->report_desc_len);
+ return 0;
+}
+
+static int acc_hid_start(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void acc_hid_stop(struct hid_device *hid)
+{
+}
+
+static int acc_hid_open(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void acc_hid_close(struct hid_device *hid)
+{
+}
+
+static struct hid_ll_driver acc_hid_ll_driver = {
+ .parse = acc_hid_parse,
+ .start = acc_hid_start,
+ .stop = acc_hid_stop,
+ .open = acc_hid_open,
+ .close = acc_hid_close,
+};
+
+static struct acc_hid_dev *acc_hid_new(struct acc_dev *dev,
+ int id, int desc_len)
+{
+ struct acc_hid_dev *hdev;
+
+ hdev = kzalloc(sizeof(*hdev), GFP_ATOMIC);
+ if (!hdev)
+ return NULL;
+ hdev->report_desc = kzalloc(desc_len, GFP_ATOMIC);
+ if (!hdev->report_desc) {
+ kfree(hdev);
+ return NULL;
+ }
+ hdev->dev = dev;
+ hdev->id = id;
+ hdev->report_desc_len = desc_len;
+
+ return hdev;
+}
+
+static struct acc_hid_dev *acc_hid_get(struct list_head *list, int id)
+{
+ struct acc_hid_dev *hid;
+
+ list_for_each_entry(hid, list, list) {
+ if (hid->id == id)
+ return hid;
+ }
+ return NULL;
+}
+
+static int acc_register_hid(struct acc_dev *dev, int id, int desc_length)
+{
+ struct acc_hid_dev *hid;
+ unsigned long flags;
+
+ /* report descriptor length must be > 0 */
+ if (desc_length <= 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ /* replace HID if one already exists with this ID */
+ hid = acc_hid_get(&dev->hid_list, id);
+ if (!hid)
+ hid = acc_hid_get(&dev->new_hid_list, id);
+ if (hid)
+ list_move(&hid->list, &dev->dead_hid_list);
+
+ hid = acc_hid_new(dev, id, desc_length);
+ if (!hid) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return -ENOMEM;
+ }
+
+ list_add(&hid->list, &dev->new_hid_list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ /* schedule work to register the HID device */
+ schedule_work(&dev->hid_work);
+ return 0;
+}
+
+static int acc_unregister_hid(struct acc_dev *dev, int id)
+{
+ struct acc_hid_dev *hid;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ hid = acc_hid_get(&dev->hid_list, id);
+ if (!hid)
+ hid = acc_hid_get(&dev->new_hid_list, id);
+ if (!hid) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return -EINVAL;
+ }
+
+ list_move(&hid->list, &dev->dead_hid_list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ schedule_work(&dev->hid_work);
+ return 0;
+}
+
+static int create_bulk_endpoints(struct acc_dev *dev,
+ struct usb_endpoint_descriptor *in_desc,
+ struct usb_endpoint_descriptor *out_desc)
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req;
+ struct usb_ep *ep;
+ int i;
+
+ DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+ ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_in = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_out = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_out = ep;
+
+ /* now allocate requests for our endpoints */
+ for (i = 0; i < TX_REQ_MAX; i++) {
+ req = acc_request_new(dev->ep_in, BULK_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = acc_complete_in;
+ req_put(dev, &dev->tx_idle, req);
+ }
+ for (i = 0; i < RX_REQ_MAX; i++) {
+ req = acc_request_new(dev->ep_out, BULK_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = acc_complete_out;
+ dev->rx_req[i] = req;
+ }
+
+ return 0;
+
+fail:
+ pr_err("acc_bind() could not allocate requests\n");
+ while ((req = req_get(dev, &dev->tx_idle)))
+ acc_request_free(req, dev->ep_in);
+ for (i = 0; i < RX_REQ_MAX; i++)
+ acc_request_free(dev->rx_req[i], dev->ep_out);
+ return -1;
+}
+
+static ssize_t acc_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct acc_dev *dev = fp->private_data;
+ struct usb_request *req;
+ int r = count, xfer;
+ int ret = 0;
+
+ pr_debug("acc_read(%d)\n", count);
+
+ if (dev->disconnected)
+ return -ENODEV;
+
+ if (count > BULK_BUFFER_SIZE)
+ count = BULK_BUFFER_SIZE;
+
+ /* we will block until we're online */
+ pr_debug("acc_read: waiting for online\n");
+ ret = wait_event_interruptible(dev->read_wq, dev->online);
+ if (ret < 0) {
+ r = ret;
+ goto done;
+ }
+
+requeue_req:
+ /* queue a request */
+ req = dev->rx_req[0];
+ req->length = count;
+ dev->rx_done = 0;
+ ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
+ if (ret < 0) {
+ r = -EIO;
+ goto done;
+ } else {
+ pr_debug("rx %p queue\n", req);
+ }
+
+ /* wait for a request to complete */
+ ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+ if (ret < 0) {
+ r = ret;
+ usb_ep_dequeue(dev->ep_out, req);
+ goto done;
+ }
+ if (dev->online) {
+ /* If we got a 0-len packet, throw it back and try again. */
+ if (req->actual == 0)
+ goto requeue_req;
+
+ pr_debug("rx %p %d\n", req, req->actual);
+ xfer = (req->actual < count) ? req->actual : count;
+ r = xfer;
+ if (copy_to_user(buf, req->buf, xfer))
+ r = -EFAULT;
+ } else
+ r = -EIO;
+
+done:
+ pr_debug("acc_read returning %d\n", r);
+ return r;
+}
+
+static ssize_t acc_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct acc_dev *dev = fp->private_data;
+ struct usb_request *req = 0;
+ int r = count, xfer;
+ int ret;
+
+ pr_debug("acc_write(%d)\n", count);
+
+ if (!dev->online || dev->disconnected)
+ return -ENODEV;
+
+ while (count > 0) {
+ if (!dev->online) {
+ pr_debug("acc_write dev->error\n");
+ r = -EIO;
+ break;
+ }
+
+ /* get an idle tx request to use */
+ req = 0;
+ ret = wait_event_interruptible(dev->write_wq,
+ ((req = req_get(dev, &dev->tx_idle)) || !dev->online));
+ if (!req) {
+ r = ret;
+ break;
+ }
+
+ if (count > BULK_BUFFER_SIZE)
+ xfer = BULK_BUFFER_SIZE;
+ else
+ xfer = count;
+ if (copy_from_user(req->buf, buf, xfer)) {
+ r = -EFAULT;
+ break;
+ }
+
+ req->length = xfer;
+ ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+ if (ret < 0) {
+ pr_debug("acc_write: xfer error %d\n", ret);
+ r = -EIO;
+ break;
+ }
+
+ buf += xfer;
+ count -= xfer;
+
+ /* zero this so we don't try to free it on error exit */
+ req = 0;
+ }
+
+ if (req)
+ req_put(dev, &dev->tx_idle, req);
+
+ pr_debug("acc_write returning %d\n", r);
+ return r;
+}
+
+static long acc_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+ struct acc_dev *dev = fp->private_data;
+ char *src = NULL;
+ int ret;
+
+ switch (code) {
+ case ACCESSORY_GET_STRING_MANUFACTURER:
+ src = dev->manufacturer;
+ break;
+ case ACCESSORY_GET_STRING_MODEL:
+ src = dev->model;
+ break;
+ case ACCESSORY_GET_STRING_DESCRIPTION:
+ src = dev->description;
+ break;
+ case ACCESSORY_GET_STRING_VERSION:
+ src = dev->version;
+ break;
+ case ACCESSORY_GET_STRING_URI:
+ src = dev->uri;
+ break;
+ case ACCESSORY_GET_STRING_SERIAL:
+ src = dev->serial;
+ break;
+ case ACCESSORY_IS_START_REQUESTED:
+ return dev->start_requested;
+ case ACCESSORY_GET_AUDIO_MODE:
+ return dev->audio_mode;
+ }
+ if (!src)
+ return -EINVAL;
+
+ ret = strlen(src) + 1;
+ if (copy_to_user((void __user *)value, src, ret))
+ ret = -EFAULT;
+ return ret;
+}
+
+static int acc_open(struct inode *ip, struct file *fp)
+{
+ printk(KERN_INFO "acc_open\n");
+ if (atomic_xchg(&_acc_dev->open_excl, 1))
+ return -EBUSY;
+
+ _acc_dev->disconnected = 0;
+ fp->private_data = _acc_dev;
+ return 0;
+}
+
+static int acc_release(struct inode *ip, struct file *fp)
+{
+ printk(KERN_INFO "acc_release\n");
+
+ WARN_ON(!atomic_xchg(&_acc_dev->open_excl, 0));
+ _acc_dev->disconnected = 0;
+ return 0;
+}
+
+/* file operations for /dev/usb_accessory */
+static const struct file_operations acc_fops = {
+ .owner = THIS_MODULE,
+ .read = acc_read,
+ .write = acc_write,
+ .unlocked_ioctl = acc_ioctl,
+ .open = acc_open,
+ .release = acc_release,
+};
+
+static int acc_hid_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int ret;
+
+ ret = hid_parse(hdev);
+ if (ret)
+ return ret;
+ return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+}
+
+static struct miscdevice acc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "usb_accessory",
+ .fops = &acc_fops,
+};
+
+static const struct hid_device_id acc_hid_table[] = {
+ { HID_USB_DEVICE(HID_ANY_ID, HID_ANY_ID) },
+ { }
+};
+
+static struct hid_driver acc_hid_driver = {
+ .name = "USB accessory",
+ .id_table = acc_hid_table,
+ .probe = acc_hid_probe,
+};
+
+static int acc_ctrlrequest(struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct acc_dev *dev = _acc_dev;
+ int value = -EOPNOTSUPP;
+ struct acc_hid_dev *hid;
+ int offset;
+ u8 b_requestType = ctrl->bRequestType;
+ u8 b_request = ctrl->bRequest;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+ unsigned long flags;
+
+/*
+ printk(KERN_INFO "acc_ctrlrequest "
+ "%02x.%02x v%04x i%04x l%u\n",
+ b_requestType, b_request,
+ w_value, w_index, w_length);
+*/
+
+ if (b_requestType == (USB_DIR_OUT | USB_TYPE_VENDOR)) {
+ if (b_request == ACCESSORY_START) {
+ dev->start_requested = 1;
+ schedule_delayed_work(
+ &dev->start_work, msecs_to_jiffies(10));
+ value = 0;
+ } else if (b_request == ACCESSORY_SEND_STRING) {
+ dev->string_index = w_index;
+ cdev->gadget->ep0->driver_data = dev;
+ cdev->req->complete = acc_complete_set_string;
+ value = w_length;
+ } else if (b_request == ACCESSORY_SET_AUDIO_MODE &&
+ w_index == 0 && w_length == 0) {
+ dev->audio_mode = w_value;
+ value = 0;
+ } else if (b_request == ACCESSORY_REGISTER_HID) {
+ value = acc_register_hid(dev, w_value, w_index);
+ } else if (b_request == ACCESSORY_UNREGISTER_HID) {
+ value = acc_unregister_hid(dev, w_value);
+ } else if (b_request == ACCESSORY_SET_HID_REPORT_DESC) {
+ spin_lock_irqsave(&dev->lock, flags);
+ hid = acc_hid_get(&dev->new_hid_list, w_value);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ if (!hid) {
+ value = -EINVAL;
+ goto err;
+ }
+ offset = w_index;
+ if (offset != hid->report_desc_offset
+ || offset + w_length > hid->report_desc_len) {
+ value = -EINVAL;
+ goto err;
+ }
+ cdev->req->context = hid;
+ cdev->req->complete = acc_complete_set_hid_report_desc;
+ value = w_length;
+ } else if (b_request == ACCESSORY_SEND_HID_EVENT) {
+ spin_lock_irqsave(&dev->lock, flags);
+ hid = acc_hid_get(&dev->hid_list, w_value);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ if (!hid) {
+ value = -EINVAL;
+ goto err;
+ }
+ cdev->req->context = hid;
+ cdev->req->complete = acc_complete_send_hid_event;
+ value = w_length;
+ }
+ } else if (b_requestType == (USB_DIR_IN | USB_TYPE_VENDOR)) {
+ if (b_request == ACCESSORY_GET_PROTOCOL) {
+ *((u16 *)cdev->req->buf) = PROTOCOL_VERSION;
+ value = sizeof(u16);
+
+ /* clear any string left over from a previous session */
+ memset(dev->manufacturer, 0, sizeof(dev->manufacturer));
+ memset(dev->model, 0, sizeof(dev->model));
+ memset(dev->description, 0, sizeof(dev->description));
+ memset(dev->version, 0, sizeof(dev->version));
+ memset(dev->uri, 0, sizeof(dev->uri));
+ memset(dev->serial, 0, sizeof(dev->serial));
+ dev->start_requested = 0;
+ dev->audio_mode = 0;
+ }
+ }
+
+ if (value >= 0) {
+ cdev->req->zero = 0;
+ cdev->req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+ if (value < 0)
+ ERROR(cdev, "%s setup response queue error\n",
+ __func__);
+ }
+
+err:
+ if (value == -EOPNOTSUPP)
+ VDBG(cdev,
+ "unknown class-specific control req "
+ "%02x.%02x v%04x i%04x l%u\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ return value;
+}
+
+static int
+acc_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct acc_dev *dev = func_to_dev(f);
+ int id;
+ int ret;
+
+ DBG(cdev, "acc_function_bind dev: %p\n", dev);
+
+ ret = hid_register_driver(&acc_hid_driver);
+ if (ret)
+ return ret;
+
+ dev->start_requested = 0;
+
+ /* allocate interface ID(s) */
+ id = usb_interface_id(c, f);
+ if (id < 0)
+ return id;
+ acc_interface_desc.bInterfaceNumber = id;
+
+ /* allocate endpoints */
+ ret = create_bulk_endpoints(dev, &acc_fullspeed_in_desc,
+ &acc_fullspeed_out_desc);
+ if (ret)
+ return ret;
+
+ /* support high speed hardware */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ acc_highspeed_in_desc.bEndpointAddress =
+ acc_fullspeed_in_desc.bEndpointAddress;
+ acc_highspeed_out_desc.bEndpointAddress =
+ acc_fullspeed_out_desc.bEndpointAddress;
+ }
+
+ DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ f->name, dev->ep_in->name, dev->ep_out->name);
+ return 0;
+}
+
+static void
+kill_all_hid_devices(struct acc_dev *dev)
+{
+ struct acc_hid_dev *hid;
+ struct list_head *entry, *temp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_for_each_safe(entry, temp, &dev->hid_list) {
+ hid = list_entry(entry, struct acc_hid_dev, list);
+ list_del(&hid->list);
+ list_add(&hid->list, &dev->dead_hid_list);
+ }
+ list_for_each_safe(entry, temp, &dev->new_hid_list) {
+ hid = list_entry(entry, struct acc_hid_dev, list);
+ list_del(&hid->list);
+ list_add(&hid->list, &dev->dead_hid_list);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ schedule_work(&dev->hid_work);
+}
+
+static void
+acc_hid_unbind(struct acc_dev *dev)
+{
+ hid_unregister_driver(&acc_hid_driver);
+ kill_all_hid_devices(dev);
+}
+
+static void
+acc_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct acc_dev *dev = func_to_dev(f);
+ struct usb_request *req;
+ int i;
+
+ while ((req = req_get(dev, &dev->tx_idle)))
+ acc_request_free(req, dev->ep_in);
+ for (i = 0; i < RX_REQ_MAX; i++)
+ acc_request_free(dev->rx_req[i], dev->ep_out);
+
+ acc_hid_unbind(dev);
+}
+
+static void acc_start_work(struct work_struct *data)
+{
+ char *envp[2] = { "ACCESSORY=START", NULL };
+ kobject_uevent_env(&acc_device.this_device->kobj, KOBJ_CHANGE, envp);
+}
+
+static int acc_hid_init(struct acc_hid_dev *hdev)
+{
+ struct hid_device *hid;
+ int ret;
+
+ hid = hid_allocate_device();
+ if (IS_ERR(hid))
+ return PTR_ERR(hid);
+
+ hid->ll_driver = &acc_hid_ll_driver;
+ hid->dev.parent = acc_device.this_device;
+
+ hid->bus = BUS_USB;
+ hid->vendor = HID_ANY_ID;
+ hid->product = HID_ANY_ID;
+ hid->driver_data = hdev;
+ ret = hid_add_device(hid);
+ if (ret) {
+ pr_err("can't add hid device: %d\n", ret);
+ hid_destroy_device(hid);
+ return ret;
+ }
+
+ hdev->hid = hid;
+ return 0;
+}
+
+static void acc_hid_delete(struct acc_hid_dev *hid)
+{
+ kfree(hid->report_desc);
+ kfree(hid);
+}
+
+static void acc_hid_work(struct work_struct *data)
+{
+ struct acc_dev *dev = _acc_dev;
+ struct list_head *entry, *temp;
+ struct acc_hid_dev *hid;
+ struct list_head new_list, dead_list;
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&new_list);
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ /* copy hids that are ready for initialization to new_list */
+ list_for_each_safe(entry, temp, &dev->new_hid_list) {
+ hid = list_entry(entry, struct acc_hid_dev, list);
+ if (hid->report_desc_offset == hid->report_desc_len)
+ list_move(&hid->list, &new_list);
+ }
+
+ if (list_empty(&dev->dead_hid_list)) {
+ INIT_LIST_HEAD(&dead_list);
+ } else {
+ /* move all of dev->dead_hid_list to dead_list */
+ dead_list.prev = dev->dead_hid_list.prev;
+ dead_list.next = dev->dead_hid_list.next;
+ dead_list.next->prev = &dead_list;
+ dead_list.prev->next = &dead_list;
+ INIT_LIST_HEAD(&dev->dead_hid_list);
+ }
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ /* register new HID devices */
+ list_for_each_safe(entry, temp, &new_list) {
+ hid = list_entry(entry, struct acc_hid_dev, list);
+ if (acc_hid_init(hid)) {
+ pr_err("can't add HID device %p\n", hid);
+ acc_hid_delete(hid);
+ } else {
+ spin_lock_irqsave(&dev->lock, flags);
+ list_move(&hid->list, &dev->hid_list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ }
+ }
+
+ /* remove dead HID devices */
+ list_for_each_safe(entry, temp, &dead_list) {
+ hid = list_entry(entry, struct acc_hid_dev, list);
+ list_del(&hid->list);
+ if (hid->hid)
+ hid_destroy_device(hid->hid);
+ acc_hid_delete(hid);
+ }
+}
+
+static int acc_function_set_alt(struct usb_function *f,
+ unsigned intf, unsigned alt)
+{
+ struct acc_dev *dev = func_to_dev(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int ret;
+
+ DBG(cdev, "acc_function_set_alt intf: %d alt: %d\n", intf, alt);
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+ if (ret)
+ return ret;
+
+ ret = usb_ep_enable(dev->ep_in);
+ if (ret)
+ return ret;
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+ if (ret)
+ return ret;
+
+ ret = usb_ep_enable(dev->ep_out);
+ if (ret) {
+ usb_ep_disable(dev->ep_in);
+ return ret;
+ }
+
+ dev->online = 1;
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->read_wq);
+ return 0;
+}
+
+static void acc_function_disable(struct usb_function *f)
+{
+ struct acc_dev *dev = func_to_dev(f);
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ DBG(cdev, "acc_function_disable\n");
+ acc_set_disconnected(dev);
+ usb_ep_disable(dev->ep_in);
+ usb_ep_disable(dev->ep_out);
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->read_wq);
+
+ VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int acc_bind_config(struct usb_configuration *c)
+{
+ struct acc_dev *dev = _acc_dev;
+ int ret;
+
+ printk(KERN_INFO "acc_bind_config\n");
+
+ /* allocate a string ID for our interface */
+ if (acc_string_defs[INTERFACE_STRING_INDEX].id == 0) {
+ ret = usb_string_id(c->cdev);
+ if (ret < 0)
+ return ret;
+ acc_string_defs[INTERFACE_STRING_INDEX].id = ret;
+ acc_interface_desc.iInterface = ret;
+ }
+
+ dev->cdev = c->cdev;
+ dev->function.name = "accessory";
+ dev->function.strings = acc_strings,
+ dev->function.fs_descriptors = fs_acc_descs;
+ dev->function.hs_descriptors = hs_acc_descs;
+ dev->function.bind = acc_function_bind;
+ dev->function.unbind = acc_function_unbind;
+ dev->function.set_alt = acc_function_set_alt;
+ dev->function.disable = acc_function_disable;
+
+ return usb_add_function(c, &dev->function);
+}
+
+static int acc_setup(void)
+{
+ struct acc_dev *dev;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->lock);
+ init_waitqueue_head(&dev->read_wq);
+ init_waitqueue_head(&dev->write_wq);
+ atomic_set(&dev->open_excl, 0);
+ INIT_LIST_HEAD(&dev->tx_idle);
+ INIT_LIST_HEAD(&dev->hid_list);
+ INIT_LIST_HEAD(&dev->new_hid_list);
+ INIT_LIST_HEAD(&dev->dead_hid_list);
+ INIT_DELAYED_WORK(&dev->start_work, acc_start_work);
+ INIT_WORK(&dev->hid_work, acc_hid_work);
+
+ /* _acc_dev must be set before calling usb_gadget_register_driver */
+ _acc_dev = dev;
+
+ ret = misc_register(&acc_device);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(dev);
+ pr_err("USB accessory gadget driver failed to initialize\n");
+ return ret;
+}
+
+static void acc_disconnect(void)
+{
+ /* unregister all HID devices if USB is disconnected */
+ kill_all_hid_devices(_acc_dev);
+}
+
+static void acc_cleanup(void)
+{
+ misc_deregister(&acc_device);
+ kfree(_acc_dev);
+ _acc_dev = NULL;
+}
diff --git a/drivers/usb/gadget/f_audio_source.c b/drivers/usb/gadget/f_audio_source.c
new file mode 100644
index 00000000000..56dcf217cfe
--- /dev/null
+++ b/drivers/usb/gadget/f_audio_source.c
@@ -0,0 +1,828 @@
+/*
+ * Gadget Function Driver for USB audio source device
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/usb/audio.h>
+#include <linux/wait.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+
+#define SAMPLE_RATE 44100
+#define FRAMES_PER_MSEC (SAMPLE_RATE / 1000)
+
+#define IN_EP_MAX_PACKET_SIZE 384
+
+/* Number of requests to allocate */
+#define IN_EP_REQ_COUNT 4
+
+#define AUDIO_AC_INTERFACE 0
+#define AUDIO_AS_INTERFACE 1
+#define AUDIO_NUM_INTERFACES 2
+
+/* B.3.1 Standard AC Interface Descriptor */
+static struct usb_interface_descriptor ac_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+};
+
+DECLARE_UAC_AC_HEADER_DESCRIPTOR(2);
+
+#define UAC_DT_AC_HEADER_LENGTH UAC_DT_AC_HEADER_SIZE(AUDIO_NUM_INTERFACES)
+/* 1 input terminal, 1 output terminal and 1 feature unit */
+#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH \
+ + UAC_DT_INPUT_TERMINAL_SIZE + UAC_DT_OUTPUT_TERMINAL_SIZE \
+ + UAC_DT_FEATURE_UNIT_SIZE(0))
+/* B.3.2 Class-Specific AC Interface Descriptor */
+static struct uac1_ac_header_descriptor_2 ac_header_desc = {
+ .bLength = UAC_DT_AC_HEADER_LENGTH,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_HEADER,
+ .bcdADC = __constant_cpu_to_le16(0x0100),
+ .wTotalLength = __constant_cpu_to_le16(UAC_DT_TOTAL_LENGTH),
+ .bInCollection = AUDIO_NUM_INTERFACES,
+ .baInterfaceNr = {
+ [0] = AUDIO_AC_INTERFACE,
+ [1] = AUDIO_AS_INTERFACE,
+ }
+};
+
+#define INPUT_TERMINAL_ID 1
+static struct uac_input_terminal_descriptor input_terminal_desc = {
+ .bLength = UAC_DT_INPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_INPUT_TERMINAL,
+ .bTerminalID = INPUT_TERMINAL_ID,
+ .wTerminalType = UAC_INPUT_TERMINAL_MICROPHONE,
+ .bAssocTerminal = 0,
+ .wChannelConfig = 0x3,
+};
+
+DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0);
+
+#define FEATURE_UNIT_ID 2
+static struct uac_feature_unit_descriptor_0 feature_unit_desc = {
+ .bLength = UAC_DT_FEATURE_UNIT_SIZE(0),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_FEATURE_UNIT,
+ .bUnitID = FEATURE_UNIT_ID,
+ .bSourceID = INPUT_TERMINAL_ID,
+ .bControlSize = 2,
+};
+
+#define OUTPUT_TERMINAL_ID 3
+static struct uac1_output_terminal_descriptor output_terminal_desc = {
+ .bLength = UAC_DT_OUTPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
+ .bTerminalID = OUTPUT_TERMINAL_ID,
+ .wTerminalType = UAC_TERMINAL_STREAMING,
+ .bAssocTerminal = FEATURE_UNIT_ID,
+ .bSourceID = FEATURE_UNIT_ID,
+};
+
+/* B.4.1 Standard AS Interface Descriptor */
+static struct usb_interface_descriptor as_interface_alt_0_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+static struct usb_interface_descriptor as_interface_alt_1_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 1,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+/* B.4.2 Class-Specific AS Interface Descriptor */
+static struct uac1_as_header_descriptor as_header_desc = {
+ .bLength = UAC_DT_AS_HEADER_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_AS_GENERAL,
+ .bTerminalLink = INPUT_TERMINAL_ID,
+ .bDelay = 1,
+ .wFormatTag = UAC_FORMAT_TYPE_I_PCM,
+};
+
+DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
+
+static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = {
+ .bLength = UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_FORMAT_TYPE,
+ .bFormatType = UAC_FORMAT_TYPE_I,
+ .bSubframeSize = 2,
+ .bBitResolution = 16,
+ .bSamFreqType = 1,
+};
+
+/* Standard ISO IN Endpoint Descriptor for highspeed */
+static struct usb_endpoint_descriptor hs_as_in_ep_desc = {
+ .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_SYNC_SYNC
+ | USB_ENDPOINT_XFER_ISOC,
+ .wMaxPacketSize = __constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE),
+ .bInterval = 4, /* poll 1 per millisecond */
+};
+
+/* Standard ISO IN Endpoint Descriptor for highspeed */
+static struct usb_endpoint_descriptor fs_as_in_ep_desc = {
+ .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_SYNC_SYNC
+ | USB_ENDPOINT_XFER_ISOC,
+ .wMaxPacketSize = __constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE),
+ .bInterval = 1, /* poll 1 per millisecond */
+};
+
+/* Class-specific AS ISO OUT Endpoint Descriptor */
+static struct uac_iso_endpoint_descriptor as_iso_in_desc = {
+ .bLength = UAC_ISO_ENDPOINT_DESC_SIZE,
+ .bDescriptorType = USB_DT_CS_ENDPOINT,
+ .bDescriptorSubtype = UAC_EP_GENERAL,
+ .bmAttributes = 1,
+ .bLockDelayUnits = 1,
+ .wLockDelay = __constant_cpu_to_le16(1),
+};
+
+static struct usb_descriptor_header *hs_audio_desc[] = {
+ (struct usb_descriptor_header *)&ac_interface_desc,
+ (struct usb_descriptor_header *)&ac_header_desc,
+
+ (struct usb_descriptor_header *)&input_terminal_desc,
+ (struct usb_descriptor_header *)&output_terminal_desc,
+ (struct usb_descriptor_header *)&feature_unit_desc,
+
+ (struct usb_descriptor_header *)&as_interface_alt_0_desc,
+ (struct usb_descriptor_header *)&as_interface_alt_1_desc,
+ (struct usb_descriptor_header *)&as_header_desc,
+
+ (struct usb_descriptor_header *)&as_type_i_desc,
+
+ (struct usb_descriptor_header *)&hs_as_in_ep_desc,
+ (struct usb_descriptor_header *)&as_iso_in_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *fs_audio_desc[] = {
+ (struct usb_descriptor_header *)&ac_interface_desc,
+ (struct usb_descriptor_header *)&ac_header_desc,
+
+ (struct usb_descriptor_header *)&input_terminal_desc,
+ (struct usb_descriptor_header *)&output_terminal_desc,
+ (struct usb_descriptor_header *)&feature_unit_desc,
+
+ (struct usb_descriptor_header *)&as_interface_alt_0_desc,
+ (struct usb_descriptor_header *)&as_interface_alt_1_desc,
+ (struct usb_descriptor_header *)&as_header_desc,
+
+ (struct usb_descriptor_header *)&as_type_i_desc,
+
+ (struct usb_descriptor_header *)&fs_as_in_ep_desc,
+ (struct usb_descriptor_header *)&as_iso_in_desc,
+ NULL,
+};
+
+static struct snd_pcm_hardware audio_hw_info = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_BATCH |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER,
+
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 2,
+ .channels_max = 2,
+ .rate_min = SAMPLE_RATE,
+ .rate_max = SAMPLE_RATE,
+
+ .buffer_bytes_max = 1024 * 1024,
+ .period_bytes_min = 64,
+ .period_bytes_max = 512 * 1024,
+ .periods_min = 2,
+ .periods_max = 1024,
+};
+
+/*-------------------------------------------------------------------------*/
+
+struct audio_source_config {
+ int card;
+ int device;
+};
+
+struct audio_dev {
+ struct usb_function func;
+ struct snd_card *card;
+ struct snd_pcm *pcm;
+ struct snd_pcm_substream *substream;
+
+ struct list_head idle_reqs;
+ struct usb_ep *in_ep;
+
+ spinlock_t lock;
+
+ /* beginning, end and current position in our buffer */
+ void *buffer_start;
+ void *buffer_end;
+ void *buffer_pos;
+
+ /* byte size of a "period" */
+ unsigned int period;
+ /* bytes sent since last call to snd_pcm_period_elapsed */
+ unsigned int period_offset;
+ /* time we started playing */
+ ktime_t start_time;
+ /* number of frames sent since start_time */
+ s64 frames_sent;
+};
+
+static inline struct audio_dev *func_to_audio(struct usb_function *f)
+{
+ return container_of(f, struct audio_dev, func);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_request *audio_request_new(struct usb_ep *ep, int buffer_size)
+{
+ struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ req->buf = kmalloc(buffer_size, GFP_KERNEL);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return NULL;
+ }
+ req->length = buffer_size;
+ return req;
+}
+
+static void audio_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+ if (req) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+static void audio_req_put(struct audio_dev *audio, struct usb_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&audio->lock, flags);
+ list_add_tail(&req->list, &audio->idle_reqs);
+ spin_unlock_irqrestore(&audio->lock, flags);
+}
+
+static struct usb_request *audio_req_get(struct audio_dev *audio)
+{
+ unsigned long flags;
+ struct usb_request *req;
+
+ spin_lock_irqsave(&audio->lock, flags);
+ if (list_empty(&audio->idle_reqs)) {
+ req = 0;
+ } else {
+ req = list_first_entry(&audio->idle_reqs, struct usb_request,
+ list);
+ list_del(&req->list);
+ }
+ spin_unlock_irqrestore(&audio->lock, flags);
+ return req;
+}
+
+/* send the appropriate number of packets to match our bitrate */
+static void audio_send(struct audio_dev *audio)
+{
+ struct snd_pcm_runtime *runtime;
+ struct usb_request *req;
+ int length, length1, length2, ret;
+ s64 msecs;
+ s64 frames;
+ ktime_t now;
+
+ /* audio->substream will be null if we have been closed */
+ if (!audio->substream)
+ return;
+ /* audio->buffer_pos will be null if we have been stopped */
+ if (!audio->buffer_pos)
+ return;
+
+ runtime = audio->substream->runtime;
+
+ /* compute number of frames to send */
+ now = ktime_get();
+ msecs = ktime_to_ns(now) - ktime_to_ns(audio->start_time);
+ do_div(msecs, 1000000);
+ frames = msecs * SAMPLE_RATE;
+ do_div(frames, 1000);
+
+ /* Readjust our frames_sent if we fall too far behind.
+ * If we get too far behind it is better to drop some frames than
+ * to keep sending data too fast in an attempt to catch up.
+ */
+ if (frames - audio->frames_sent > 10 * FRAMES_PER_MSEC)
+ audio->frames_sent = frames - FRAMES_PER_MSEC;
+
+ frames -= audio->frames_sent;
+
+ /* We need to send something to keep the pipeline going */
+ if (frames <= 0)
+ frames = FRAMES_PER_MSEC;
+
+ while (frames > 0) {
+ req = audio_req_get(audio);
+ if (!req)
+ break;
+
+ length = frames_to_bytes(runtime, frames);
+ if (length > IN_EP_MAX_PACKET_SIZE)
+ length = IN_EP_MAX_PACKET_SIZE;
+
+ if (audio->buffer_pos + length > audio->buffer_end)
+ length1 = audio->buffer_end - audio->buffer_pos;
+ else
+ length1 = length;
+ memcpy(req->buf, audio->buffer_pos, length1);
+ if (length1 < length) {
+ /* Wrap around and copy remaining length
+ * at beginning of buffer.
+ */
+ length2 = length - length1;
+ memcpy(req->buf + length1, audio->buffer_start,
+ length2);
+ audio->buffer_pos = audio->buffer_start + length2;
+ } else {
+ audio->buffer_pos += length1;
+ if (audio->buffer_pos >= audio->buffer_end)
+ audio->buffer_pos = audio->buffer_start;
+ }
+
+ req->length = length;
+ ret = usb_ep_queue(audio->in_ep, req, GFP_ATOMIC);
+ if (ret < 0) {
+ pr_err("usb_ep_queue failed ret: %d\n", ret);
+ audio_req_put(audio, req);
+ break;
+ }
+
+ frames -= bytes_to_frames(runtime, length);
+ audio->frames_sent += bytes_to_frames(runtime, length);
+ }
+}
+
+static void audio_control_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ /* nothing to do here */
+}
+
+static void audio_data_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct audio_dev *audio = req->context;
+
+ pr_debug("audio_data_complete req->status %d req->actual %d\n",
+ req->status, req->actual);
+
+ audio_req_put(audio, req);
+
+ if (!audio->buffer_start || req->status)
+ return;
+
+ audio->period_offset += req->actual;
+ if (audio->period_offset >= audio->period) {
+ snd_pcm_period_elapsed(audio->substream);
+ audio->period_offset = 0;
+ }
+ audio_send(audio);
+}
+
+static int audio_set_endpoint_req(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ int value = -EOPNOTSUPP;
+ u16 ep = le16_to_cpu(ctrl->wIndex);
+ u16 len = le16_to_cpu(ctrl->wLength);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+
+ pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+ ctrl->bRequest, w_value, len, ep);
+
+ switch (ctrl->bRequest) {
+ case UAC_SET_CUR:
+ case UAC_SET_MIN:
+ case UAC_SET_MAX:
+ case UAC_SET_RES:
+ value = len;
+ break;
+ default:
+ break;
+ }
+
+ return value;
+}
+
+static int audio_get_endpoint_req(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int value = -EOPNOTSUPP;
+ u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+ u16 len = le16_to_cpu(ctrl->wLength);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u8 *buf = cdev->req->buf;
+
+ pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+ ctrl->bRequest, w_value, len, ep);
+
+ if (w_value == UAC_EP_CS_ATTR_SAMPLE_RATE << 8) {
+ switch (ctrl->bRequest) {
+ case UAC_GET_CUR:
+ case UAC_GET_MIN:
+ case UAC_GET_MAX:
+ case UAC_GET_RES:
+ /* return our sample rate */
+ buf[0] = (u8)SAMPLE_RATE;
+ buf[1] = (u8)(SAMPLE_RATE >> 8);
+ buf[2] = (u8)(SAMPLE_RATE >> 16);
+ value = 3;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return value;
+}
+
+static int
+audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ /* composite driver infrastructure handles everything; interface
+ * activation uses set_alt().
+ */
+ switch (ctrl->bRequestType) {
+ case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+ value = audio_set_endpoint_req(f, ctrl);
+ break;
+
+ case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+ value = audio_get_endpoint_req(f, ctrl);
+ break;
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ pr_debug("audio req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = 0;
+ req->length = value;
+ req->complete = audio_control_complete;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0)
+ pr_err("audio response on err %d\n", value);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+static int audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct audio_dev *audio = func_to_audio(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int ret;
+
+ pr_debug("audio_set_alt intf %d, alt %d\n", intf, alt);
+
+ ret = config_ep_by_speed(cdev->gadget, f, audio->in_ep);
+ if (ret)
+ return ret;
+
+ usb_ep_enable(audio->in_ep);
+ return 0;
+}
+
+static void audio_disable(struct usb_function *f)
+{
+ struct audio_dev *audio = func_to_audio(f);
+
+ pr_debug("audio_disable\n");
+ usb_ep_disable(audio->in_ep);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void audio_build_desc(struct audio_dev *audio)
+{
+ u8 *sam_freq;
+ int rate;
+
+ /* Set channel numbers */
+ input_terminal_desc.bNrChannels = 2;
+ as_type_i_desc.bNrChannels = 2;
+
+ /* Set sample rates */
+ rate = SAMPLE_RATE;
+ sam_freq = as_type_i_desc.tSamFreq[0];
+ memcpy(sam_freq, &rate, 3);
+}
+
+/* audio function driver setup/binding */
+static int
+audio_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct audio_dev *audio = func_to_audio(f);
+ int status;
+ struct usb_ep *ep;
+ struct usb_request *req;
+ int i;
+
+ audio_build_desc(audio);
+
+ /* allocate instance-specific interface IDs, and patch descriptors */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ ac_interface_desc.bInterfaceNumber = status;
+
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ as_interface_alt_0_desc.bInterfaceNumber = status;
+ as_interface_alt_1_desc.bInterfaceNumber = status;
+
+ status = -ENODEV;
+
+ /* allocate our endpoint */
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_as_in_ep_desc);
+ if (!ep)
+ goto fail;
+ audio->in_ep = ep;
+ ep->driver_data = audio; /* claim */
+
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ hs_as_in_ep_desc.bEndpointAddress =
+ fs_as_in_ep_desc.bEndpointAddress;
+
+ f->fs_descriptors = fs_audio_desc;
+ f->hs_descriptors = hs_audio_desc;
+
+ for (i = 0, status = 0; i < IN_EP_REQ_COUNT && status == 0; i++) {
+ req = audio_request_new(ep, IN_EP_MAX_PACKET_SIZE);
+ if (req) {
+ req->context = audio;
+ req->complete = audio_data_complete;
+ audio_req_put(audio, req);
+ } else
+ status = -ENOMEM;
+ }
+
+fail:
+ return status;
+}
+
+static void
+audio_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct audio_dev *audio = func_to_audio(f);
+ struct usb_request *req;
+
+ while ((req = audio_req_get(audio)))
+ audio_request_free(req, audio->in_ep);
+
+ snd_card_free_when_closed(audio->card);
+ audio->card = NULL;
+ audio->pcm = NULL;
+ audio->substream = NULL;
+ audio->in_ep = NULL;
+}
+
+static void audio_pcm_playback_start(struct audio_dev *audio)
+{
+ audio->start_time = ktime_get();
+ audio->frames_sent = 0;
+ audio_send(audio);
+}
+
+static void audio_pcm_playback_stop(struct audio_dev *audio)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&audio->lock, flags);
+ audio->buffer_start = 0;
+ audio->buffer_end = 0;
+ audio->buffer_pos = 0;
+ spin_unlock_irqrestore(&audio->lock, flags);
+}
+
+static int audio_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct audio_dev *audio = substream->private_data;
+
+ runtime->private_data = audio;
+ runtime->hw = audio_hw_info;
+ snd_pcm_limit_hw_rates(runtime);
+ runtime->hw.channels_max = 2;
+
+ audio->substream = substream;
+ return 0;
+}
+
+static int audio_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct audio_dev *audio = substream->private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&audio->lock, flags);
+ audio->substream = NULL;
+ spin_unlock_irqrestore(&audio->lock, flags);
+
+ return 0;
+}
+
+static int audio_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ unsigned int channels = params_channels(params);
+ unsigned int rate = params_rate(params);
+
+ if (rate != SAMPLE_RATE)
+ return -EINVAL;
+ if (channels != 2)
+ return -EINVAL;
+
+ return snd_pcm_lib_alloc_vmalloc_buffer(substream,
+ params_buffer_bytes(params));
+}
+
+static int audio_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+
+static int audio_pcm_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct audio_dev *audio = runtime->private_data;
+
+ audio->period = snd_pcm_lib_period_bytes(substream);
+ audio->period_offset = 0;
+ audio->buffer_start = runtime->dma_area;
+ audio->buffer_end = audio->buffer_start
+ + snd_pcm_lib_buffer_bytes(substream);
+ audio->buffer_pos = audio->buffer_start;
+
+ return 0;
+}
+
+static snd_pcm_uframes_t audio_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct audio_dev *audio = runtime->private_data;
+ ssize_t bytes = audio->buffer_pos - audio->buffer_start;
+
+ /* return offset of next frame to fill in our buffer */
+ return bytes_to_frames(runtime, bytes);
+}
+
+static int audio_pcm_playback_trigger(struct snd_pcm_substream *substream,
+ int cmd)
+{
+ struct audio_dev *audio = substream->runtime->private_data;
+ int ret = 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ audio_pcm_playback_start(audio);
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ audio_pcm_playback_stop(audio);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static struct audio_dev _audio_dev = {
+ .func = {
+ .name = "audio_source",
+ .bind = audio_bind,
+ .unbind = audio_unbind,
+ .set_alt = audio_set_alt,
+ .setup = audio_setup,
+ .disable = audio_disable,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(_audio_dev.lock),
+ .idle_reqs = LIST_HEAD_INIT(_audio_dev.idle_reqs),
+};
+
+static struct snd_pcm_ops audio_playback_ops = {
+ .open = audio_pcm_open,
+ .close = audio_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = audio_pcm_hw_params,
+ .hw_free = audio_pcm_hw_free,
+ .prepare = audio_pcm_prepare,
+ .trigger = audio_pcm_playback_trigger,
+ .pointer = audio_pcm_pointer,
+};
+
+int audio_source_bind_config(struct usb_configuration *c,
+ struct audio_source_config *config)
+{
+ struct audio_dev *audio;
+ struct snd_card *card;
+ struct snd_pcm *pcm;
+ int err;
+
+ config->card = -1;
+ config->device = -1;
+
+ audio = &_audio_dev;
+
+ err = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+ THIS_MODULE, 0, &card);
+ if (err)
+ return err;
+
+ snd_card_set_dev(card, &c->cdev->gadget->dev);
+
+ err = snd_pcm_new(card, "USB audio source", 0, 1, 0, &pcm);
+ if (err)
+ goto pcm_fail;
+ pcm->private_data = audio;
+ pcm->info_flags = 0;
+ audio->pcm = pcm;
+
+ strlcpy(pcm->name, "USB gadget audio", sizeof(pcm->name));
+
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &audio_playback_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ NULL, 0, 64 * 1024);
+
+ strlcpy(card->driver, "audio_source", sizeof(card->driver));
+ strlcpy(card->shortname, card->driver, sizeof(card->shortname));
+ strlcpy(card->longname, "USB accessory audio source",
+ sizeof(card->longname));
+
+ err = snd_card_register(card);
+ if (err)
+ goto register_fail;
+
+ err = usb_add_function(c, &audio->func);
+ if (err)
+ goto add_fail;
+
+ config->card = pcm->card->number;
+ config->device = pcm->device;
+ audio->card = card;
+ return 0;
+
+add_fail:
+register_fail:
+pcm_fail:
+ snd_card_free(audio->card);
+ return err;
+}
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index b6e9d917221..bf7d627b945 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1561,7 +1561,12 @@ static int ffs_func_eps_enable(struct ffs_function *func)
spin_lock_irqsave(&func->ffs->eps_lock, flags);
do {
struct usb_endpoint_descriptor *ds;
- ds = ep->descs[ep->descs[1] ? 1 : 0];
+ int desc_idx = ffs->gadget->speed == USB_SPEED_HIGH ? 1 : 0;
+ ds = ep->descs[desc_idx];
+ if (!ds) {
+ ret = -EINVAL;
+ break;
+ }
ep->ep->driver_data = ep;
ep->ep->desc = ds;
diff --git a/drivers/usb/gadget/f_mtp.c b/drivers/usb/gadget/f_mtp.c
new file mode 100644
index 00000000000..960d64fbd40
--- /dev/null
+++ b/drivers/usb/gadget/f_mtp.c
@@ -0,0 +1,1285 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include <linux/usb.h>
+#include <linux/usb_usual.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/f_mtp.h>
+
+#define MTP_BULK_BUFFER_SIZE 16384
+#define INTR_BUFFER_SIZE 28
+
+/* String IDs */
+#define INTERFACE_STRING_INDEX 0
+
+/* values for mtp_dev.state */
+#define STATE_OFFLINE 0 /* initial state, disconnected */
+#define STATE_READY 1 /* ready for userspace calls */
+#define STATE_BUSY 2 /* processing userspace calls */
+#define STATE_CANCELED 3 /* transaction canceled by host */
+#define STATE_ERROR 4 /* error from completion routine */
+
+/* number of tx and rx requests to allocate */
+#define TX_REQ_MAX 4
+#define RX_REQ_MAX 2
+#define INTR_REQ_MAX 5
+
+/* ID for Microsoft MTP OS String */
+#define MTP_OS_STRING_ID 0xEE
+
+/* MTP class reqeusts */
+#define MTP_REQ_CANCEL 0x64
+#define MTP_REQ_GET_EXT_EVENT_DATA 0x65
+#define MTP_REQ_RESET 0x66
+#define MTP_REQ_GET_DEVICE_STATUS 0x67
+
+/* constants for device status */
+#define MTP_RESPONSE_OK 0x2001
+#define MTP_RESPONSE_DEVICE_BUSY 0x2019
+
+static const char mtp_shortname[] = "mtp_usb";
+
+struct mtp_dev {
+ struct usb_function function;
+ struct usb_composite_dev *cdev;
+ spinlock_t lock;
+
+ struct usb_ep *ep_in;
+ struct usb_ep *ep_out;
+ struct usb_ep *ep_intr;
+
+ int state;
+
+ /* synchronize access to our device file */
+ atomic_t open_excl;
+ /* to enforce only one ioctl at a time */
+ atomic_t ioctl_excl;
+
+ struct list_head tx_idle;
+ struct list_head intr_idle;
+
+ wait_queue_head_t read_wq;
+ wait_queue_head_t write_wq;
+ wait_queue_head_t intr_wq;
+ struct usb_request *rx_req[RX_REQ_MAX];
+ int rx_done;
+
+ /* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and
+ * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue
+ */
+ struct workqueue_struct *wq;
+ struct work_struct send_file_work;
+ struct work_struct receive_file_work;
+ struct file *xfer_file;
+ loff_t xfer_file_offset;
+ int64_t xfer_file_length;
+ unsigned xfer_send_header;
+ uint16_t xfer_command;
+ uint32_t xfer_transaction_id;
+ int xfer_result;
+};
+
+static struct usb_interface_descriptor mtp_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 0,
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
+ .bInterfaceProtocol = 0,
+};
+
+static struct usb_interface_descriptor ptp_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 0,
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_STILL_IMAGE,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 1,
+};
+
+static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mtp_intr_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
+ .bInterval = 6,
+};
+
+static struct usb_descriptor_header *fs_mtp_descs[] = {
+ (struct usb_descriptor_header *) &mtp_interface_desc,
+ (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
+ (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
+ (struct usb_descriptor_header *) &mtp_intr_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *hs_mtp_descs[] = {
+ (struct usb_descriptor_header *) &mtp_interface_desc,
+ (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
+ (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
+ (struct usb_descriptor_header *) &mtp_intr_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *fs_ptp_descs[] = {
+ (struct usb_descriptor_header *) &ptp_interface_desc,
+ (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
+ (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
+ (struct usb_descriptor_header *) &mtp_intr_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *hs_ptp_descs[] = {
+ (struct usb_descriptor_header *) &ptp_interface_desc,
+ (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
+ (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
+ (struct usb_descriptor_header *) &mtp_intr_desc,
+ NULL,
+};
+
+static struct usb_string mtp_string_defs[] = {
+ /* Naming interface "MTP" so libmtp will recognize us */
+ [INTERFACE_STRING_INDEX].s = "MTP",
+ { }, /* end of list */
+};
+
+static struct usb_gadget_strings mtp_string_table = {
+ .language = 0x0409, /* en-US */
+ .strings = mtp_string_defs,
+};
+
+static struct usb_gadget_strings *mtp_strings[] = {
+ &mtp_string_table,
+ NULL,
+};
+
+/* Microsoft MTP OS String */
+static u8 mtp_os_string[] = {
+ 18, /* sizeof(mtp_os_string) */
+ USB_DT_STRING,
+ /* Signature field: "MSFT100" */
+ 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
+ /* vendor code */
+ 1,
+ /* padding */
+ 0
+};
+
+/* Microsoft Extended Configuration Descriptor Header Section */
+struct mtp_ext_config_desc_header {
+ __le32 dwLength;
+ __u16 bcdVersion;
+ __le16 wIndex;
+ __u8 bCount;
+ __u8 reserved[7];
+};
+
+/* Microsoft Extended Configuration Descriptor Function Section */
+struct mtp_ext_config_desc_function {
+ __u8 bFirstInterfaceNumber;
+ __u8 bInterfaceCount;
+ __u8 compatibleID[8];
+ __u8 subCompatibleID[8];
+ __u8 reserved[6];
+};
+
+/* MTP Extended Configuration Descriptor */
+struct {
+ struct mtp_ext_config_desc_header header;
+ struct mtp_ext_config_desc_function function;
+} mtp_ext_config_desc = {
+ .header = {
+ .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
+ .bcdVersion = __constant_cpu_to_le16(0x0100),
+ .wIndex = __constant_cpu_to_le16(4),
+ .bCount = __constant_cpu_to_le16(1),
+ },
+ .function = {
+ .bFirstInterfaceNumber = 0,
+ .bInterfaceCount = 1,
+ .compatibleID = { 'M', 'T', 'P' },
+ },
+};
+
+struct mtp_device_status {
+ __le16 wLength;
+ __le16 wCode;
+};
+
+struct mtp_data_header {
+ /* length of packet, including this header */
+ __le32 length;
+ /* container type (2 for data packet) */
+ __le16 type;
+ /* MTP command code */
+ __le16 command;
+ /* MTP transaction ID */
+ __le32 transaction_id;
+};
+
+/* temporary variable used between mtp_open() and mtp_gadget_bind() */
+static struct mtp_dev *_mtp_dev;
+
+static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
+{
+ return container_of(f, struct mtp_dev, function);
+}
+
+static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
+{
+ struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ /* now allocate buffers for the requests */
+ req->buf = kmalloc(buffer_size, GFP_KERNEL);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return NULL;
+ }
+
+ return req;
+}
+
+static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+ if (req) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+static inline int mtp_lock(atomic_t *excl)
+{
+ if (atomic_inc_return(excl) == 1) {
+ return 0;
+ } else {
+ atomic_dec(excl);
+ return -1;
+ }
+}
+
+static inline void mtp_unlock(atomic_t *excl)
+{
+ atomic_dec(excl);
+}
+
+/* add a request to the tail of a list */
+static void mtp_req_put(struct mtp_dev *dev, struct list_head *head,
+ struct usb_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&req->list, head);
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request
+*mtp_req_get(struct mtp_dev *dev, struct list_head *head)
+{
+ unsigned long flags;
+ struct usb_request *req;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (list_empty(head)) {
+ req = 0;
+ } else {
+ req = list_first_entry(head, struct usb_request, list);
+ list_del(&req->list);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return req;
+}
+
+static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+ struct mtp_dev *dev = _mtp_dev;
+
+ if (req->status != 0)
+ dev->state = STATE_ERROR;
+
+ mtp_req_put(dev, &dev->tx_idle, req);
+
+ wake_up(&dev->write_wq);
+}
+
+static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+ struct mtp_dev *dev = _mtp_dev;
+
+ dev->rx_done = 1;
+ if (req->status != 0)
+ dev->state = STATE_ERROR;
+
+ wake_up(&dev->read_wq);
+}
+
+static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
+{
+ struct mtp_dev *dev = _mtp_dev;
+
+ if (req->status != 0)
+ dev->state = STATE_ERROR;
+
+ mtp_req_put(dev, &dev->intr_idle, req);
+
+ wake_up(&dev->intr_wq);
+}
+
+static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
+ struct usb_endpoint_descriptor *in_desc,
+ struct usb_endpoint_descriptor *out_desc,
+ struct usb_endpoint_descriptor *intr_desc)
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req;
+ struct usb_ep *ep;
+ int i;
+
+ DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+ ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_in = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_out = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_intr = ep;
+
+ /* now allocate requests for our endpoints */
+ for (i = 0; i < TX_REQ_MAX; i++) {
+ req = mtp_request_new(dev->ep_in, MTP_BULK_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = mtp_complete_in;
+ mtp_req_put(dev, &dev->tx_idle, req);
+ }
+ for (i = 0; i < RX_REQ_MAX; i++) {
+ req = mtp_request_new(dev->ep_out, MTP_BULK_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = mtp_complete_out;
+ dev->rx_req[i] = req;
+ }
+ for (i = 0; i < INTR_REQ_MAX; i++) {
+ req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = mtp_complete_intr;
+ mtp_req_put(dev, &dev->intr_idle, req);
+ }
+
+ return 0;
+
+fail:
+ printk(KERN_ERR "mtp_bind() could not allocate requests\n");
+ return -1;
+}
+
+static ssize_t mtp_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct mtp_dev *dev = fp->private_data;
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req;
+ int r = count, xfer;
+ int ret = 0;
+
+ DBG(cdev, "mtp_read(%d)\n", count);
+
+ if (count > MTP_BULK_BUFFER_SIZE)
+ return -EINVAL;
+
+ /* we will block until we're online */
+ DBG(cdev, "mtp_read: waiting for online state\n");
+ ret = wait_event_interruptible(dev->read_wq,
+ dev->state != STATE_OFFLINE);
+ if (ret < 0) {
+ r = ret;
+ goto done;
+ }
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED) {
+ /* report cancelation to userspace */
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+ return -ECANCELED;
+ }
+ dev->state = STATE_BUSY;
+ spin_unlock_irq(&dev->lock);
+
+requeue_req:
+ /* queue a request */
+ req = dev->rx_req[0];
+ req->length = count;
+ dev->rx_done = 0;
+ ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
+ if (ret < 0) {
+ r = -EIO;
+ goto done;
+ } else {
+ DBG(cdev, "rx %p queue\n", req);
+ }
+
+ /* wait for a request to complete */
+ ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+ if (ret < 0) {
+ r = ret;
+ usb_ep_dequeue(dev->ep_out, req);
+ goto done;
+ }
+ if (dev->state == STATE_BUSY) {
+ /* If we got a 0-len packet, throw it back and try again. */
+ if (req->actual == 0)
+ goto requeue_req;
+
+ DBG(cdev, "rx %p %d\n", req, req->actual);
+ xfer = (req->actual < count) ? req->actual : count;
+ r = xfer;
+ if (copy_to_user(buf, req->buf, xfer))
+ r = -EFAULT;
+ } else
+ r = -EIO;
+
+done:
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED)
+ r = -ECANCELED;
+ else if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+
+ DBG(cdev, "mtp_read returning %d\n", r);
+ return r;
+}
+
+static ssize_t mtp_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct mtp_dev *dev = fp->private_data;
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req = 0;
+ int r = count, xfer;
+ int sendZLP = 0;
+ int ret;
+
+ DBG(cdev, "mtp_write(%d)\n", count);
+
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED) {
+ /* report cancelation to userspace */
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+ return -ECANCELED;
+ }
+ if (dev->state == STATE_OFFLINE) {
+ spin_unlock_irq(&dev->lock);
+ return -ENODEV;
+ }
+ dev->state = STATE_BUSY;
+ spin_unlock_irq(&dev->lock);
+
+ /* we need to send a zero length packet to signal the end of transfer
+ * if the transfer size is aligned to a packet boundary.
+ */
+ if ((count & (dev->ep_in->maxpacket - 1)) == 0)
+ sendZLP = 1;
+
+ while (count > 0 || sendZLP) {
+ /* so we exit after sending ZLP */
+ if (count == 0)
+ sendZLP = 0;
+
+ if (dev->state != STATE_BUSY) {
+ DBG(cdev, "mtp_write dev->error\n");
+ r = -EIO;
+ break;
+ }
+
+ /* get an idle tx request to use */
+ req = 0;
+ ret = wait_event_interruptible(dev->write_wq,
+ ((req = mtp_req_get(dev, &dev->tx_idle))
+ || dev->state != STATE_BUSY));
+ if (!req) {
+ r = ret;
+ break;
+ }
+
+ if (count > MTP_BULK_BUFFER_SIZE)
+ xfer = MTP_BULK_BUFFER_SIZE;
+ else
+ xfer = count;
+ if (xfer && copy_from_user(req->buf, buf, xfer)) {
+ r = -EFAULT;
+ break;
+ }
+
+ req->length = xfer;
+ ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+ if (ret < 0) {
+ DBG(cdev, "mtp_write: xfer error %d\n", ret);
+ r = -EIO;
+ break;
+ }
+
+ buf += xfer;
+ count -= xfer;
+
+ /* zero this so we don't try to free it on error exit */
+ req = 0;
+ }
+
+ if (req)
+ mtp_req_put(dev, &dev->tx_idle, req);
+
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED)
+ r = -ECANCELED;
+ else if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+
+ DBG(cdev, "mtp_write returning %d\n", r);
+ return r;
+}
+
+/* read from a local file and write to USB */
+static void send_file_work(struct work_struct *data)
+{
+ struct mtp_dev *dev = container_of(data, struct mtp_dev,
+ send_file_work);
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req = 0;
+ struct mtp_data_header *header;
+ struct file *filp;
+ loff_t offset;
+ int64_t count;
+ int xfer, ret, hdr_size;
+ int r = 0;
+ int sendZLP = 0;
+
+ /* read our parameters */
+ smp_rmb();
+ filp = dev->xfer_file;
+ offset = dev->xfer_file_offset;
+ count = dev->xfer_file_length;
+
+ DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
+
+ if (dev->xfer_send_header) {
+ hdr_size = sizeof(struct mtp_data_header);
+ count += hdr_size;
+ } else {
+ hdr_size = 0;
+ }
+
+ /* we need to send a zero length packet to signal the end of transfer
+ * if the transfer size is aligned to a packet boundary.
+ */
+ if ((count & (dev->ep_in->maxpacket - 1)) == 0)
+ sendZLP = 1;
+
+ while (count > 0 || sendZLP) {
+ /* so we exit after sending ZLP */
+ if (count == 0)
+ sendZLP = 0;
+
+ /* get an idle tx request to use */
+ req = 0;
+ ret = wait_event_interruptible(dev->write_wq,
+ (req = mtp_req_get(dev, &dev->tx_idle))
+ || dev->state != STATE_BUSY);
+ if (dev->state == STATE_CANCELED) {
+ r = -ECANCELED;
+ break;
+ }
+ if (!req) {
+ r = ret;
+ break;
+ }
+
+ if (count > MTP_BULK_BUFFER_SIZE)
+ xfer = MTP_BULK_BUFFER_SIZE;
+ else
+ xfer = count;
+
+ if (hdr_size) {
+ /* prepend MTP data header */
+ header = (struct mtp_data_header *)req->buf;
+ header->length = __cpu_to_le32(count);
+ header->type = __cpu_to_le16(2); /* data packet */
+ header->command = __cpu_to_le16(dev->xfer_command);
+ header->transaction_id =
+ __cpu_to_le32(dev->xfer_transaction_id);
+ }
+
+ ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size,
+ &offset);
+ if (ret < 0) {
+ r = ret;
+ break;
+ }
+ xfer = ret + hdr_size;
+ hdr_size = 0;
+
+ req->length = xfer;
+ ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+ if (ret < 0) {
+ DBG(cdev, "send_file_work: xfer error %d\n", ret);
+ dev->state = STATE_ERROR;
+ r = -EIO;
+ break;
+ }
+
+ count -= xfer;
+
+ /* zero this so we don't try to free it on error exit */
+ req = 0;
+ }
+
+ if (req)
+ mtp_req_put(dev, &dev->tx_idle, req);
+
+ DBG(cdev, "send_file_work returning %d\n", r);
+ /* write the result */
+ dev->xfer_result = r;
+ smp_wmb();
+}
+
+/* read from USB and write to a local file */
+static void receive_file_work(struct work_struct *data)
+{
+ struct mtp_dev *dev = container_of(data, struct mtp_dev,
+ receive_file_work);
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *read_req = NULL, *write_req = NULL;
+ struct file *filp;
+ loff_t offset;
+ int64_t count;
+ int ret, cur_buf = 0;
+ int r = 0;
+
+ /* read our parameters */
+ smp_rmb();
+ filp = dev->xfer_file;
+ offset = dev->xfer_file_offset;
+ count = dev->xfer_file_length;
+
+ DBG(cdev, "receive_file_work(%lld)\n", count);
+
+ while (count > 0 || write_req) {
+ if (count > 0) {
+ /* queue a request */
+ read_req = dev->rx_req[cur_buf];
+ cur_buf = (cur_buf + 1) % RX_REQ_MAX;
+
+ read_req->length = (count > MTP_BULK_BUFFER_SIZE
+ ? MTP_BULK_BUFFER_SIZE : count);
+ dev->rx_done = 0;
+ ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
+ if (ret < 0) {
+ r = -EIO;
+ dev->state = STATE_ERROR;
+ break;
+ }
+ }
+
+ if (write_req) {
+ DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
+ ret = vfs_write(filp, write_req->buf, write_req->actual,
+ &offset);
+ DBG(cdev, "vfs_write %d\n", ret);
+ if (ret != write_req->actual) {
+ r = -EIO;
+ dev->state = STATE_ERROR;
+ break;
+ }
+ write_req = NULL;
+ }
+
+ if (read_req) {
+ /* wait for our last read to complete */
+ ret = wait_event_interruptible(dev->read_wq,
+ dev->rx_done || dev->state != STATE_BUSY);
+ if (dev->state == STATE_CANCELED) {
+ r = -ECANCELED;
+ if (!dev->rx_done)
+ usb_ep_dequeue(dev->ep_out, read_req);
+ break;
+ }
+ /* if xfer_file_length is 0xFFFFFFFF, then we read until
+ * we get a zero length packet
+ */
+ if (count != 0xFFFFFFFF)
+ count -= read_req->actual;
+ if (read_req->actual < read_req->length) {
+ /*
+ * short packet is used to signal EOF for
+ * sizes > 4 gig
+ */
+ DBG(cdev, "got short packet\n");
+ count = 0;
+ }
+
+ write_req = read_req;
+ read_req = NULL;
+ }
+ }
+
+ DBG(cdev, "receive_file_work returning %d\n", r);
+ /* write the result */
+ dev->xfer_result = r;
+ smp_wmb();
+}
+
+static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
+{
+ struct usb_request *req = NULL;
+ int ret;
+ int length = event->length;
+
+ DBG(dev->cdev, "mtp_send_event(%d)\n", event->length);
+
+ if (length < 0 || length > INTR_BUFFER_SIZE)
+ return -EINVAL;
+ if (dev->state == STATE_OFFLINE)
+ return -ENODEV;
+
+ ret = wait_event_interruptible_timeout(dev->intr_wq,
+ (req = mtp_req_get(dev, &dev->intr_idle)),
+ msecs_to_jiffies(1000));
+ if (!req)
+ return -ETIME;
+
+ if (copy_from_user(req->buf, (void __user *)event->data, length)) {
+ mtp_req_put(dev, &dev->intr_idle, req);
+ return -EFAULT;
+ }
+ req->length = length;
+ ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
+ if (ret)
+ mtp_req_put(dev, &dev->intr_idle, req);
+
+ return ret;
+}
+
+static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+ struct mtp_dev *dev = fp->private_data;
+ struct file *filp = NULL;
+ int ret = -EINVAL;
+
+ if (mtp_lock(&dev->ioctl_excl))
+ return -EBUSY;
+
+ switch (code) {
+ case MTP_SEND_FILE:
+ case MTP_RECEIVE_FILE:
+ case MTP_SEND_FILE_WITH_HEADER:
+ {
+ struct mtp_file_range mfr;
+ struct work_struct *work;
+
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED) {
+ /* report cancelation to userspace */
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+ ret = -ECANCELED;
+ goto out;
+ }
+ if (dev->state == STATE_OFFLINE) {
+ spin_unlock_irq(&dev->lock);
+ ret = -ENODEV;
+ goto out;
+ }
+ dev->state = STATE_BUSY;
+ spin_unlock_irq(&dev->lock);
+
+ if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
+ ret = -EFAULT;
+ goto fail;
+ }
+ /* hold a reference to the file while we are working with it */
+ filp = fget(mfr.fd);
+ if (!filp) {
+ ret = -EBADF;
+ goto fail;
+ }
+
+ /* write the parameters */
+ dev->xfer_file = filp;
+ dev->xfer_file_offset = mfr.offset;
+ dev->xfer_file_length = mfr.length;
+ smp_wmb();
+
+ if (code == MTP_SEND_FILE_WITH_HEADER) {
+ work = &dev->send_file_work;
+ dev->xfer_send_header = 1;
+ dev->xfer_command = mfr.command;
+ dev->xfer_transaction_id = mfr.transaction_id;
+ } else if (code == MTP_SEND_FILE) {
+ work = &dev->send_file_work;
+ dev->xfer_send_header = 0;
+ } else {
+ work = &dev->receive_file_work;
+ }
+
+ /* We do the file transfer on a work queue so it will run
+ * in kernel context, which is necessary for vfs_read and
+ * vfs_write to use our buffers in the kernel address space.
+ */
+ queue_work(dev->wq, work);
+ /* wait for operation to complete */
+ flush_workqueue(dev->wq);
+ fput(filp);
+
+ /* read the result */
+ smp_rmb();
+ ret = dev->xfer_result;
+ break;
+ }
+ case MTP_SEND_EVENT:
+ {
+ struct mtp_event event;
+ /* return here so we don't change dev->state below,
+ * which would interfere with bulk transfer state.
+ */
+ if (copy_from_user(&event, (void __user *)value, sizeof(event)))
+ ret = -EFAULT;
+ else
+ ret = mtp_send_event(dev, &event);
+ goto out;
+ }
+ }
+
+fail:
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED)
+ ret = -ECANCELED;
+ else if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+out:
+ mtp_unlock(&dev->ioctl_excl);
+ DBG(dev->cdev, "ioctl returning %d\n", ret);
+ return ret;
+}
+
+static int mtp_open(struct inode *ip, struct file *fp)
+{
+ printk(KERN_INFO "mtp_open\n");
+ if (mtp_lock(&_mtp_dev->open_excl))
+ return -EBUSY;
+
+ /* clear any error condition */
+ if (_mtp_dev->state != STATE_OFFLINE)
+ _mtp_dev->state = STATE_READY;
+
+ fp->private_data = _mtp_dev;
+ return 0;
+}
+
+static int mtp_release(struct inode *ip, struct file *fp)
+{
+ printk(KERN_INFO "mtp_release\n");
+
+ mtp_unlock(&_mtp_dev->open_excl);
+ return 0;
+}
+
+/* file operations for /dev/mtp_usb */
+static const struct file_operations mtp_fops = {
+ .owner = THIS_MODULE,
+ .read = mtp_read,
+ .write = mtp_write,
+ .unlocked_ioctl = mtp_ioctl,
+ .open = mtp_open,
+ .release = mtp_release,
+};
+
+static struct miscdevice mtp_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = mtp_shortname,
+ .fops = &mtp_fops,
+};
+
+static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct mtp_dev *dev = _mtp_dev;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+ unsigned long flags;
+
+ VDBG(cdev, "mtp_ctrlrequest "
+ "%02x.%02x v%04x i%04x l%u\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+
+ /* Handle MTP OS string */
+ if (ctrl->bRequestType ==
+ (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
+ && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
+ && (w_value >> 8) == USB_DT_STRING
+ && (w_value & 0xFF) == MTP_OS_STRING_ID) {
+ value = (w_length < sizeof(mtp_os_string)
+ ? w_length : sizeof(mtp_os_string));
+ memcpy(cdev->req->buf, mtp_os_string, value);
+ } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
+ /* Handle MTP OS descriptor */
+ DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
+ ctrl->bRequest, w_index, w_value, w_length);
+
+ if (ctrl->bRequest == 1
+ && (ctrl->bRequestType & USB_DIR_IN)
+ && (w_index == 4 || w_index == 5)) {
+ value = (w_length < sizeof(mtp_ext_config_desc) ?
+ w_length : sizeof(mtp_ext_config_desc));
+ memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
+ }
+ } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
+ DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
+ ctrl->bRequest, w_index, w_value, w_length);
+
+ if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
+ && w_value == 0) {
+ DBG(cdev, "MTP_REQ_CANCEL\n");
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->state == STATE_BUSY) {
+ dev->state = STATE_CANCELED;
+ wake_up(&dev->read_wq);
+ wake_up(&dev->write_wq);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ /* We need to queue a request to read the remaining
+ * bytes, but we don't actually need to look at
+ * the contents.
+ */
+ value = w_length;
+ } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
+ && w_index == 0 && w_value == 0) {
+ struct mtp_device_status *status = cdev->req->buf;
+ status->wLength =
+ __constant_cpu_to_le16(sizeof(*status));
+
+ DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
+ spin_lock_irqsave(&dev->lock, flags);
+ /* device status is "busy" until we report
+ * the cancelation to userspace
+ */
+ if (dev->state == STATE_CANCELED)
+ status->wCode =
+ __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
+ else
+ status->wCode =
+ __cpu_to_le16(MTP_RESPONSE_OK);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ value = sizeof(*status);
+ }
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ int rc;
+ cdev->req->zero = value < w_length;
+ cdev->req->length = value;
+ rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+ if (rc < 0)
+ ERROR(cdev, "%s: response queue error\n", __func__);
+ }
+ return value;
+}
+
+static int
+mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct mtp_dev *dev = func_to_mtp(f);
+ int id;
+ int ret;
+
+ dev->cdev = cdev;
+ DBG(cdev, "mtp_function_bind dev: %p\n", dev);
+
+ /* allocate interface ID(s) */
+ id = usb_interface_id(c, f);
+ if (id < 0)
+ return id;
+ mtp_interface_desc.bInterfaceNumber = id;
+
+ /* allocate endpoints */
+ ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
+ &mtp_fullspeed_out_desc, &mtp_intr_desc);
+ if (ret)
+ return ret;
+
+ /* support high speed hardware */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ mtp_highspeed_in_desc.bEndpointAddress =
+ mtp_fullspeed_in_desc.bEndpointAddress;
+ mtp_highspeed_out_desc.bEndpointAddress =
+ mtp_fullspeed_out_desc.bEndpointAddress;
+ }
+
+ DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ f->name, dev->ep_in->name, dev->ep_out->name);
+ return 0;
+}
+
+static void
+mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct mtp_dev *dev = func_to_mtp(f);
+ struct usb_request *req;
+ int i;
+
+ while ((req = mtp_req_get(dev, &dev->tx_idle)))
+ mtp_request_free(req, dev->ep_in);
+ for (i = 0; i < RX_REQ_MAX; i++)
+ mtp_request_free(dev->rx_req[i], dev->ep_out);
+ while ((req = mtp_req_get(dev, &dev->intr_idle)))
+ mtp_request_free(req, dev->ep_intr);
+ dev->state = STATE_OFFLINE;
+}
+
+static int mtp_function_set_alt(struct usb_function *f,
+ unsigned intf, unsigned alt)
+{
+ struct mtp_dev *dev = func_to_mtp(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int ret;
+
+ DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+ if (ret)
+ return ret;
+
+ ret = usb_ep_enable(dev->ep_in);
+ if (ret)
+ return ret;
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+ if (ret)
+ return ret;
+
+ ret = usb_ep_enable(dev->ep_out);
+ if (ret) {
+ usb_ep_disable(dev->ep_in);
+ return ret;
+ }
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->ep_intr);
+ if (ret)
+ return ret;
+
+ ret = usb_ep_enable(dev->ep_intr);
+ if (ret) {
+ usb_ep_disable(dev->ep_out);
+ usb_ep_disable(dev->ep_in);
+ return ret;
+ }
+ dev->state = STATE_READY;
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->read_wq);
+ return 0;
+}
+
+static void mtp_function_disable(struct usb_function *f)
+{
+ struct mtp_dev *dev = func_to_mtp(f);
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ DBG(cdev, "mtp_function_disable\n");
+ dev->state = STATE_OFFLINE;
+ usb_ep_disable(dev->ep_in);
+ usb_ep_disable(dev->ep_out);
+ usb_ep_disable(dev->ep_intr);
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->read_wq);
+
+ VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int mtp_bind_config(struct usb_configuration *c, bool ptp_config)
+{
+ struct mtp_dev *dev = _mtp_dev;
+ int ret = 0;
+
+ printk(KERN_INFO "mtp_bind_config\n");
+
+ /* allocate a string ID for our interface */
+ if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
+ ret = usb_string_id(c->cdev);
+ if (ret < 0)
+ return ret;
+ mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
+ mtp_interface_desc.iInterface = ret;
+ }
+
+ dev->cdev = c->cdev;
+ dev->function.name = "mtp";
+ dev->function.strings = mtp_strings;
+ if (ptp_config) {
+ dev->function.fs_descriptors = fs_ptp_descs;
+ dev->function.hs_descriptors = hs_ptp_descs;
+ } else {
+ dev->function.fs_descriptors = fs_mtp_descs;
+ dev->function.hs_descriptors = hs_mtp_descs;
+ }
+ dev->function.bind = mtp_function_bind;
+ dev->function.unbind = mtp_function_unbind;
+ dev->function.set_alt = mtp_function_set_alt;
+ dev->function.disable = mtp_function_disable;
+
+ return usb_add_function(c, &dev->function);
+}
+
+static int mtp_setup(void)
+{
+ struct mtp_dev *dev;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->lock);
+ init_waitqueue_head(&dev->read_wq);
+ init_waitqueue_head(&dev->write_wq);
+ init_waitqueue_head(&dev->intr_wq);
+ atomic_set(&dev->open_excl, 0);
+ atomic_set(&dev->ioctl_excl, 0);
+ INIT_LIST_HEAD(&dev->tx_idle);
+ INIT_LIST_HEAD(&dev->intr_idle);
+
+ dev->wq = create_singlethread_workqueue("f_mtp");
+ if (!dev->wq) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+ INIT_WORK(&dev->send_file_work, send_file_work);
+ INIT_WORK(&dev->receive_file_work, receive_file_work);
+
+ _mtp_dev = dev;
+
+ ret = misc_register(&mtp_device);
+ if (ret)
+ goto err2;
+
+ return 0;
+
+err2:
+ destroy_workqueue(dev->wq);
+err1:
+ _mtp_dev = NULL;
+ kfree(dev);
+ printk(KERN_ERR "mtp gadget driver failed to initialize\n");
+ return ret;
+}
+
+static void mtp_cleanup(void)
+{
+ struct mtp_dev *dev = _mtp_dev;
+
+ if (!dev)
+ return;
+
+ misc_deregister(&mtp_device);
+ destroy_workqueue(dev->wq);
+ _mtp_dev = NULL;
+ kfree(dev);
+}
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 36e8c44d8e5..21c5ee2482d 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -821,12 +821,12 @@ rndis_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
if (!can_support_rndis(c) || !ethaddr)
return -EINVAL;
- if (rndis_string_defs[0].id == 0) {
- /* ... and setup RNDIS itself */
- status = rndis_init();
- if (status < 0)
- return status;
+ /* setup RNDIS itself */
+ status = rndis_init();
+ if (status < 0)
+ return status;
+ if (rndis_string_defs[0].id == 0) {
status = usb_string_ids_tab(c->cdev, rndis_string_defs);
if (status)
return status;
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 1e4cfb05f70..693f0c24d51 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -1127,11 +1127,15 @@ static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS];
#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
+static bool rndis_initialized;
int rndis_init(void)
{
u8 i;
+ if (rndis_initialized)
+ return 0;
+
for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
char name [20];
@@ -1158,6 +1162,7 @@ int rndis_init(void)
INIT_LIST_HEAD(&(rndis_per_dev_params[i].resp_queue));
}
+ rndis_initialized = true;
return 0;
}
@@ -1166,7 +1171,13 @@ void rndis_exit(void)
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
u8 i;
char name[20];
+#endif
+ if (!rndis_initialized)
+ return;
+ rndis_initialized = false;
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
sprintf(name, NAME_TEMPLATE, i);
remove_proc_entry(name, NULL);
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index b369292d4b9..72068082e83 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -1126,6 +1126,7 @@ int gserial_alloc_line(unsigned char *line_num)
tty_dev = tty_port_register_device(&ports[port_num].port->port,
gs_tty_driver, port_num, NULL);
+
if (IS_ERR(tty_dev)) {
struct gs_port *port;
pr_err("%s: failed to register tty for port %d, err %ld\n",
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index 5514822114a..afe9b9e50cc 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -335,7 +335,15 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri
driver->unbind(udc->gadget);
goto err1;
}
- usb_gadget_connect(udc->gadget);
+ /*
+ * HACK: The Android gadget driver disconnects the gadget
+ * on bind and expects the gadget to stay disconnected until
+ * it calls usb_gadget_connect when userspace is ready. Remove
+ * the call to usb_gadget_connect bellow to avoid enabling the
+ * pullup before userspace is ready.
+ *
+ * usb_gadget_connect(udc->gadget);
+ */
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
return 0;
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 2311b1e4e43..3312ad2bb67 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -16,6 +16,14 @@ menuconfig USB_PHY
If you're not sure if this applies to you, it probably doesn't;
say N here.
+config USB_OTG_WAKELOCK
+ bool "Hold a wakelock when USB connected"
+ depends on WAKELOCK
+ select USB_OTG_UTILS
+ help
+ Select this to automatically hold a wakelock when USB is
+ connected, preventing suspend.
+
if USB_PHY
#
diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile
index a9169cb1e6f..a0a6cbad880 100644
--- a/drivers/usb/phy/Makefile
+++ b/drivers/usb/phy/Makefile
@@ -5,6 +5,7 @@
ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG
obj-$(CONFIG_USB_PHY) += phy.o
+obj-$(CONFIG_USB_OTG_WAKELOCK) += otg-wakelock.o
# transceiver drivers, keep the list sorted
diff --git a/drivers/usb/phy/otg-wakelock.c b/drivers/usb/phy/otg-wakelock.c
new file mode 100644
index 00000000000..479376bfa48
--- /dev/null
+++ b/drivers/usb/phy/otg-wakelock.c
@@ -0,0 +1,173 @@
+/*
+ * otg-wakelock.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/wakelock.h>
+#include <linux/spinlock.h>
+#include <linux/usb/otg.h>
+
+#define TEMPORARY_HOLD_TIME 2000
+
+static bool enabled = true;
+static struct usb_phy *otgwl_xceiv;
+static struct notifier_block otgwl_nb;
+
+/*
+ * otgwl_spinlock is held while the VBUS lock is grabbed or dropped and the
+ * held field is updated to match.
+ */
+
+static DEFINE_SPINLOCK(otgwl_spinlock);
+
+/*
+ * Only one lock, but since these 3 fields are associated with each other...
+ */
+
+struct otgwl_lock {
+ char name[40];
+ struct wake_lock wakelock;
+ bool held;
+};
+
+/*
+ * VBUS present lock. Also used as a timed lock on charger
+ * connect/disconnect and USB host disconnect, to allow the system
+ * to react to the change in power.
+ */
+
+static struct otgwl_lock vbus_lock;
+
+static void otgwl_hold(struct otgwl_lock *lock)
+{
+ if (!lock->held) {
+ wake_lock(&lock->wakelock);
+ lock->held = true;
+ }
+}
+
+static void otgwl_temporary_hold(struct otgwl_lock *lock)
+{
+ wake_lock_timeout(&lock->wakelock,
+ msecs_to_jiffies(TEMPORARY_HOLD_TIME));
+ lock->held = false;
+}
+
+static void otgwl_drop(struct otgwl_lock *lock)
+{
+ if (lock->held) {
+ wake_unlock(&lock->wakelock);
+ lock->held = false;
+ }
+}
+
+static void otgwl_handle_event(unsigned long event)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&otgwl_spinlock, irqflags);
+
+ if (!enabled) {
+ otgwl_drop(&vbus_lock);
+ spin_unlock_irqrestore(&otgwl_spinlock, irqflags);
+ return;
+ }
+
+ switch (event) {
+ case USB_EVENT_VBUS:
+ case USB_EVENT_ENUMERATED:
+ otgwl_hold(&vbus_lock);
+ break;
+
+ case USB_EVENT_NONE:
+ case USB_EVENT_ID:
+ case USB_EVENT_CHARGER:
+ otgwl_temporary_hold(&vbus_lock);
+ break;
+
+ default:
+ break;
+ }
+
+ spin_unlock_irqrestore(&otgwl_spinlock, irqflags);
+}
+
+static int otgwl_otg_notifications(struct notifier_block *nb,
+ unsigned long event, void *unused)
+{
+ otgwl_handle_event(event);
+ return NOTIFY_OK;
+}
+
+static int set_enabled(const char *val, const struct kernel_param *kp)
+{
+ int rv = param_set_bool(val, kp);
+
+ if (rv)
+ return rv;
+
+ if (otgwl_xceiv)
+ otgwl_handle_event(otgwl_xceiv->last_event);
+
+ return 0;
+}
+
+static struct kernel_param_ops enabled_param_ops = {
+ .set = set_enabled,
+ .get = param_get_bool,
+};
+
+module_param_cb(enabled, &enabled_param_ops, &enabled, 0644);
+MODULE_PARM_DESC(enabled, "enable wakelock when VBUS present");
+
+static int __init otg_wakelock_init(void)
+{
+ int ret;
+ struct usb_phy *phy;
+
+ phy = usb_get_phy(USB_PHY_TYPE_USB2);
+
+ if (IS_ERR(phy)) {
+ pr_err("%s: No USB transceiver found\n", __func__);
+ return PTR_ERR(phy);
+ }
+ otgwl_xceiv = phy;
+
+ snprintf(vbus_lock.name, sizeof(vbus_lock.name), "vbus-%s",
+ dev_name(otgwl_xceiv->dev));
+ wake_lock_init(&vbus_lock.wakelock, WAKE_LOCK_SUSPEND,
+ vbus_lock.name);
+
+ otgwl_nb.notifier_call = otgwl_otg_notifications;
+ ret = usb_register_notifier(otgwl_xceiv, &otgwl_nb);
+
+ if (ret) {
+ pr_err("%s: usb_register_notifier on transceiver %s"
+ " failed\n", __func__,
+ dev_name(otgwl_xceiv->dev));
+ otgwl_xceiv = NULL;
+ wake_lock_destroy(&vbus_lock.wakelock);
+ return ret;
+ }
+
+ otgwl_handle_event(otgwl_xceiv->last_event);
+ return ret;
+}
+
+late_initcall(otg_wakelock_init);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 29a5121ce7f..a3279c7def7 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2496,6 +2496,7 @@ source "drivers/video/omap2/Kconfig"
source "drivers/video/exynos/Kconfig"
source "drivers/video/mmp/Kconfig"
source "drivers/video/backlight/Kconfig"
+source "drivers/video/adf/Kconfig"
if VT
source "drivers/video/console/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 33869eea498..3adbd32eb09 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -12,6 +12,7 @@ fb-y := fbmem.o fbmon.o fbcmap.o fbsysfs.o \
modedb.o fbcvt.o
fb-objs := $(fb-y)
+obj-$(CONFIG_ADF) += adf/
obj-$(CONFIG_VT) += console/
obj-$(CONFIG_LOGO) += logo/
obj-y += backlight/
diff --git a/drivers/video/adf/Kconfig b/drivers/video/adf/Kconfig
new file mode 100644
index 00000000000..33858b73d8b
--- /dev/null
+++ b/drivers/video/adf/Kconfig
@@ -0,0 +1,14 @@
+menuconfig ADF
+ depends on SYNC
+ depends on DMA_SHARED_BUFFER
+ tristate "Atomic Display Framework"
+
+menuconfig ADF_FBDEV
+ depends on ADF
+ depends on FB
+ tristate "Helper for implementing the fbdev API in ADF drivers"
+
+menuconfig ADF_MEMBLOCK
+ depends on ADF
+ depends on HAVE_MEMBLOCK
+ tristate "Helper for using memblocks as buffers in ADF drivers"
diff --git a/drivers/video/adf/Makefile b/drivers/video/adf/Makefile
new file mode 100644
index 00000000000..78d0915122f
--- /dev/null
+++ b/drivers/video/adf/Makefile
@@ -0,0 +1,15 @@
+ccflags-y := -Idrivers/staging/android
+
+CFLAGS_adf.o := -I$(src)
+
+obj-$(CONFIG_ADF) += adf.o \
+ adf_client.o \
+ adf_fops.o \
+ adf_format.o \
+ adf_sysfs.o
+
+obj-$(CONFIG_COMPAT) += adf_fops32.o
+
+obj-$(CONFIG_ADF_FBDEV) += adf_fbdev.o
+
+obj-$(CONFIG_ADF_MEMBLOCK) += adf_memblock.o
diff --git a/drivers/video/adf/adf.c b/drivers/video/adf/adf.c
new file mode 100644
index 00000000000..933e74ac809
--- /dev/null
+++ b/drivers/video/adf/adf.c
@@ -0,0 +1,1166 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ * adf_modeinfo_{set_name,set_vrefresh} modified from
+ * drivers/gpu/drm/drm_modes.c
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/highmem.h>
+#include <linux/memblock.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <video/adf_format.h>
+
+#include "sw_sync.h"
+#include "sync.h"
+
+#include "adf.h"
+#include "adf_fops.h"
+#include "adf_sysfs.h"
+
+#define CREATE_TRACE_POINTS
+#include "adf_trace.h"
+
+#define ADF_SHORT_FENCE_TIMEOUT (1 * MSEC_PER_SEC)
+#define ADF_LONG_FENCE_TIMEOUT (10 * MSEC_PER_SEC)
+
+static DEFINE_IDR(adf_devices);
+
+static void adf_fence_wait(struct adf_device *dev, struct sync_fence *fence)
+{
+ /* sync_fence_wait() dumps debug information on timeout. Experience
+ has shown that if the pipeline gets stuck, a short timeout followed
+ by a longer one provides useful information for debugging. */
+ int err = sync_fence_wait(fence, ADF_SHORT_FENCE_TIMEOUT);
+ if (err >= 0)
+ return;
+
+ if (err == -ETIME)
+ err = sync_fence_wait(fence, ADF_LONG_FENCE_TIMEOUT);
+
+ if (err < 0)
+ dev_warn(&dev->base.dev, "error waiting on fence: %d\n", err);
+}
+
+void adf_buffer_cleanup(struct adf_buffer *buf)
+{
+ size_t i;
+ for (i = 0; i < ARRAY_SIZE(buf->dma_bufs); i++)
+ if (buf->dma_bufs[i])
+ dma_buf_put(buf->dma_bufs[i]);
+
+ if (buf->acquire_fence)
+ sync_fence_put(buf->acquire_fence);
+}
+
+void adf_buffer_mapping_cleanup(struct adf_buffer_mapping *mapping,
+ struct adf_buffer *buf)
+{
+ /* calling adf_buffer_mapping_cleanup() is safe even if mapping is
+ uninitialized or partially-initialized, as long as it was
+ zeroed on allocation */
+ size_t i;
+ for (i = 0; i < ARRAY_SIZE(mapping->sg_tables); i++) {
+ if (mapping->sg_tables[i])
+ dma_buf_unmap_attachment(mapping->attachments[i],
+ mapping->sg_tables[i], DMA_TO_DEVICE);
+ if (mapping->attachments[i])
+ dma_buf_detach(buf->dma_bufs[i],
+ mapping->attachments[i]);
+ }
+}
+
+void adf_post_cleanup(struct adf_device *dev, struct adf_pending_post *post)
+{
+ size_t i;
+
+ if (post->state)
+ dev->ops->state_free(dev, post->state);
+
+ for (i = 0; i < post->config.n_bufs; i++) {
+ adf_buffer_mapping_cleanup(&post->config.mappings[i],
+ &post->config.bufs[i]);
+ adf_buffer_cleanup(&post->config.bufs[i]);
+ }
+
+ kfree(post->config.custom_data);
+ kfree(post->config.mappings);
+ kfree(post->config.bufs);
+ kfree(post);
+}
+
+static void adf_sw_advance_timeline(struct adf_device *dev)
+{
+#ifdef CONFIG_SW_SYNC
+ sw_sync_timeline_inc(dev->timeline, 1);
+#else
+ BUG();
+#endif
+}
+
+static void adf_post_work_func(struct kthread_work *work)
+{
+ struct adf_device *dev =
+ container_of(work, struct adf_device, post_work);
+ struct adf_pending_post *post, *next;
+ struct list_head saved_list;
+
+ mutex_lock(&dev->post_lock);
+ memcpy(&saved_list, &dev->post_list, sizeof(saved_list));
+ list_replace_init(&dev->post_list, &saved_list);
+ mutex_unlock(&dev->post_lock);
+
+ list_for_each_entry_safe(post, next, &saved_list, head) {
+ int i;
+
+ for (i = 0; i < post->config.n_bufs; i++) {
+ struct sync_fence *fence =
+ post->config.bufs[i].acquire_fence;
+ if (fence)
+ adf_fence_wait(dev, fence);
+ }
+
+ dev->ops->post(dev, &post->config, post->state);
+
+ if (dev->ops->advance_timeline)
+ dev->ops->advance_timeline(dev, &post->config,
+ post->state);
+ else
+ adf_sw_advance_timeline(dev);
+
+ list_del(&post->head);
+ if (dev->onscreen)
+ adf_post_cleanup(dev, dev->onscreen);
+ dev->onscreen = post;
+ }
+}
+
+void adf_attachment_free(struct adf_attachment_list *attachment)
+{
+ list_del(&attachment->head);
+ kfree(attachment);
+}
+
+struct adf_event_refcount *adf_obj_find_event_refcount(struct adf_obj *obj,
+ enum adf_event_type type)
+{
+ struct rb_root *root = &obj->event_refcount;
+ struct rb_node **new = &(root->rb_node);
+ struct rb_node *parent = NULL;
+ struct adf_event_refcount *refcount;
+
+ while (*new) {
+ refcount = container_of(*new, struct adf_event_refcount, node);
+ parent = *new;
+
+ if (refcount->type > type)
+ new = &(*new)->rb_left;
+ else if (refcount->type < type)
+ new = &(*new)->rb_right;
+ else
+ return refcount;
+ }
+
+ refcount = kzalloc(sizeof(*refcount), GFP_KERNEL);
+ if (!refcount)
+ return NULL;
+ refcount->type = type;
+
+ rb_link_node(&refcount->node, parent, new);
+ rb_insert_color(&refcount->node, root);
+ return refcount;
+}
+
+/**
+ * adf_event_get - increase the refcount for an event
+ *
+ * @obj: the object that produces the event
+ * @type: the event type
+ *
+ * ADF will call the object's set_event() op if needed. ops are allowed
+ * to sleep, so adf_event_get() must NOT be called from an atomic context.
+ *
+ * Returns 0 if successful, or -%EINVAL if the object does not support the
+ * requested event type.
+ */
+int adf_event_get(struct adf_obj *obj, enum adf_event_type type)
+{
+ struct adf_event_refcount *refcount;
+ int old_refcount;
+ int ret;
+
+ ret = adf_obj_check_supports_event(obj, type);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&obj->event_lock);
+
+ refcount = adf_obj_find_event_refcount(obj, type);
+ if (!refcount) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ old_refcount = refcount->refcount++;
+
+ if (old_refcount == 0) {
+ obj->ops->set_event(obj, type, true);
+ trace_adf_event_enable(obj, type);
+ }
+
+done:
+ mutex_unlock(&obj->event_lock);
+ return ret;
+}
+EXPORT_SYMBOL(adf_event_get);
+
+/**
+ * adf_event_put - decrease the refcount for an event
+ *
+ * @obj: the object that produces the event
+ * @type: the event type
+ *
+ * ADF will call the object's set_event() op if needed. ops are allowed
+ * to sleep, so adf_event_put() must NOT be called from an atomic context.
+ *
+ * Returns 0 if successful, -%EINVAL if the object does not support the
+ * requested event type, or -%EALREADY if the refcount is already 0.
+ */
+int adf_event_put(struct adf_obj *obj, enum adf_event_type type)
+{
+ struct adf_event_refcount *refcount;
+ int old_refcount;
+ int ret;
+
+ ret = adf_obj_check_supports_event(obj, type);
+ if (ret < 0)
+ return ret;
+
+
+ mutex_lock(&obj->event_lock);
+
+ refcount = adf_obj_find_event_refcount(obj, type);
+ if (!refcount) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ old_refcount = refcount->refcount--;
+
+ if (WARN_ON(old_refcount == 0)) {
+ refcount->refcount++;
+ ret = -EALREADY;
+ } else if (old_refcount == 1) {
+ obj->ops->set_event(obj, type, false);
+ trace_adf_event_disable(obj, type);
+ }
+
+done:
+ mutex_unlock(&obj->event_lock);
+ return ret;
+}
+EXPORT_SYMBOL(adf_event_put);
+
+/**
+ * adf_vsync_wait - wait for a vsync event on a display interface
+ *
+ * @intf: the display interface
+ * @timeout: timeout in jiffies (0 = wait indefinitely)
+ *
+ * adf_vsync_wait() may sleep, so it must NOT be called from an atomic context.
+ *
+ * This function returns -%ERESTARTSYS if it is interrupted by a signal.
+ * If @timeout == 0 then this function returns 0 on vsync. If @timeout > 0 then
+ * this function returns the number of remaining jiffies or -%ETIMEDOUT on
+ * timeout.
+ */
+int adf_vsync_wait(struct adf_interface *intf, long timeout)
+{
+ ktime_t timestamp;
+ int ret;
+ unsigned long flags;
+
+ read_lock_irqsave(&intf->vsync_lock, flags);
+ timestamp = intf->vsync_timestamp;
+ read_unlock_irqrestore(&intf->vsync_lock, flags);
+
+ adf_vsync_get(intf);
+ if (timeout) {
+ ret = wait_event_interruptible_timeout(intf->vsync_wait,
+ !ktime_equal(timestamp,
+ intf->vsync_timestamp),
+ msecs_to_jiffies(timeout));
+ if (ret == 0 && ktime_equal(timestamp, intf->vsync_timestamp))
+ ret = -ETIMEDOUT;
+ } else {
+ ret = wait_event_interruptible(intf->vsync_wait,
+ !ktime_equal(timestamp,
+ intf->vsync_timestamp));
+ }
+ adf_vsync_put(intf);
+
+ return ret;
+}
+EXPORT_SYMBOL(adf_vsync_wait);
+
+static void adf_event_queue(struct adf_obj *obj, struct adf_event *event)
+{
+ struct adf_file *file;
+ unsigned long flags;
+
+ trace_adf_event(obj, event->type);
+
+ spin_lock_irqsave(&obj->file_lock, flags);
+
+ list_for_each_entry(file, &obj->file_list, head)
+ if (test_bit(event->type, file->event_subscriptions))
+ adf_file_queue_event(file, event);
+
+ spin_unlock_irqrestore(&obj->file_lock, flags);
+}
+
+/**
+ * adf_event_notify - notify userspace of a driver-private event
+ *
+ * @obj: the ADF object that produced the event
+ * @event: the event
+ *
+ * adf_event_notify() may be called safely from an atomic context. It will
+ * copy @event if needed, so @event may point to a variable on the stack.
+ *
+ * Drivers must NOT call adf_event_notify() for vsync and hotplug events.
+ * ADF provides adf_vsync_notify() and
+ * adf_hotplug_notify_{connected,disconnected}() for these events.
+ */
+int adf_event_notify(struct adf_obj *obj, struct adf_event *event)
+{
+ if (WARN_ON(event->type == ADF_EVENT_VSYNC ||
+ event->type == ADF_EVENT_HOTPLUG))
+ return -EINVAL;
+
+ adf_event_queue(obj, event);
+ return 0;
+}
+EXPORT_SYMBOL(adf_event_notify);
+
+/**
+ * adf_vsync_notify - notify ADF of a display interface's vsync event
+ *
+ * @intf: the display interface
+ * @timestamp: the time the vsync occurred
+ *
+ * adf_vsync_notify() may be called safely from an atomic context.
+ */
+void adf_vsync_notify(struct adf_interface *intf, ktime_t timestamp)
+{
+ unsigned long flags;
+ struct adf_vsync_event event;
+
+ write_lock_irqsave(&intf->vsync_lock, flags);
+ intf->vsync_timestamp = timestamp;
+ write_unlock_irqrestore(&intf->vsync_lock, flags);
+
+ wake_up_interruptible_all(&intf->vsync_wait);
+
+ event.base.type = ADF_EVENT_VSYNC;
+ event.base.length = sizeof(event);
+ event.timestamp = ktime_to_ns(timestamp);
+ adf_event_queue(&intf->base, &event.base);
+}
+EXPORT_SYMBOL(adf_vsync_notify);
+
+void adf_hotplug_notify(struct adf_interface *intf, bool connected,
+ struct drm_mode_modeinfo *modelist, size_t n_modes)
+{
+ unsigned long flags;
+ struct adf_hotplug_event event;
+ struct drm_mode_modeinfo *old_modelist;
+
+ write_lock_irqsave(&intf->hotplug_modelist_lock, flags);
+ old_modelist = intf->modelist;
+ intf->hotplug_detect = connected;
+ intf->modelist = modelist;
+ intf->n_modes = n_modes;
+ write_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);
+
+ kfree(old_modelist);
+
+ event.base.length = sizeof(event);
+ event.base.type = ADF_EVENT_HOTPLUG;
+ event.connected = connected;
+ adf_event_queue(&intf->base, &event.base);
+}
+
+/**
+ * adf_hotplug_notify_connected - notify ADF of a display interface being
+ * connected to a display
+ *
+ * @intf: the display interface
+ * @modelist: hardware modes supported by display
+ * @n_modes: length of modelist
+ *
+ * @modelist is copied as needed, so it may point to a variable on the stack.
+ *
+ * adf_hotplug_notify_connected() may NOT be called safely from an atomic
+ * context.
+ *
+ * Returns 0 on success or error code (<0) on error.
+ */
+int adf_hotplug_notify_connected(struct adf_interface *intf,
+ struct drm_mode_modeinfo *modelist, size_t n_modes)
+{
+ struct drm_mode_modeinfo *modelist_copy;
+
+ if (n_modes > ADF_MAX_MODES)
+ return -ENOMEM;
+
+ modelist_copy = kzalloc(sizeof(modelist_copy[0]) * n_modes,
+ GFP_KERNEL);
+ if (!modelist_copy)
+ return -ENOMEM;
+ memcpy(modelist_copy, modelist, sizeof(modelist_copy[0]) * n_modes);
+
+ adf_hotplug_notify(intf, true, modelist_copy, n_modes);
+ return 0;
+}
+EXPORT_SYMBOL(adf_hotplug_notify_connected);
+
+/**
+ * adf_hotplug_notify_disconnected - notify ADF of a display interface being
+ * disconnected from a display
+ *
+ * @intf: the display interface
+ *
+ * adf_hotplug_notify_disconnected() may be called safely from an atomic
+ * context.
+ */
+void adf_hotplug_notify_disconnected(struct adf_interface *intf)
+{
+ adf_hotplug_notify(intf, false, NULL, 0);
+}
+EXPORT_SYMBOL(adf_hotplug_notify_disconnected);
+
+static int adf_obj_init(struct adf_obj *obj, enum adf_obj_type type,
+ struct idr *idr, struct adf_device *parent,
+ const struct adf_obj_ops *ops, const char *fmt, va_list args)
+{
+ int ret;
+
+ if (ops && ops->supports_event && !ops->set_event) {
+ pr_err("%s: %s implements supports_event but not set_event\n",
+ __func__, adf_obj_type_str(type));
+ return -EINVAL;
+ }
+
+ ret = idr_alloc(idr, obj, 0, 0, GFP_KERNEL);
+ if (ret < 0) {
+ pr_err("%s: allocating object id failed: %d\n", __func__, ret);
+ return ret;
+ }
+ obj->id = ret;
+
+ vscnprintf(obj->name, sizeof(obj->name), fmt, args);
+
+ obj->type = type;
+ obj->ops = ops;
+ obj->parent = parent;
+ mutex_init(&obj->event_lock);
+ obj->event_refcount = RB_ROOT;
+ spin_lock_init(&obj->file_lock);
+ INIT_LIST_HEAD(&obj->file_list);
+ return 0;
+}
+
+static void adf_obj_destroy(struct adf_obj *obj, struct idr *idr)
+{
+ struct rb_node *node = rb_first(&obj->event_refcount);
+
+ while (node) {
+ struct adf_event_refcount *refcount =
+ container_of(node, struct adf_event_refcount,
+ node);
+ kfree(refcount);
+ node = rb_first(&obj->event_refcount);
+ }
+
+ mutex_destroy(&obj->event_lock);
+ idr_remove(idr, obj->id);
+}
+
+/**
+ * adf_device_init - initialize ADF-internal data for a display device
+ * and create sysfs entries
+ *
+ * @dev: the display device
+ * @parent: the device's parent device
+ * @ops: the device's associated ops
+ * @fmt: formatting string for the display device's name
+ *
+ * @fmt specifies the device's sysfs filename and the name returned to
+ * userspace through the %ADF_GET_DEVICE_DATA ioctl.
+ *
+ * Returns 0 on success or error code (<0) on failure.
+ */
+int adf_device_init(struct adf_device *dev, struct device *parent,
+ const struct adf_device_ops *ops, const char *fmt, ...)
+{
+ int ret;
+ va_list args;
+
+ if (!ops->validate || !ops->post) {
+ pr_err("%s: device must implement validate and post\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (!ops->complete_fence && !ops->advance_timeline) {
+ if (!IS_ENABLED(CONFIG_SW_SYNC)) {
+ pr_err("%s: device requires sw_sync but it is not enabled in the kernel\n",
+ __func__);
+ return -EINVAL;
+ }
+ } else if (!(ops->complete_fence && ops->advance_timeline)) {
+ pr_err("%s: device must implement both complete_fence and advance_timeline, or implement neither\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ memset(dev, 0, sizeof(*dev));
+
+ va_start(args, fmt);
+ ret = adf_obj_init(&dev->base, ADF_OBJ_DEVICE, &adf_devices, dev,
+ &ops->base, fmt, args);
+ va_end(args);
+ if (ret < 0)
+ return ret;
+
+ dev->dev = parent;
+ dev->ops = ops;
+ idr_init(&dev->overlay_engines);
+ idr_init(&dev->interfaces);
+ mutex_init(&dev->client_lock);
+ INIT_LIST_HEAD(&dev->post_list);
+ mutex_init(&dev->post_lock);
+ init_kthread_worker(&dev->post_worker);
+ INIT_LIST_HEAD(&dev->attached);
+ INIT_LIST_HEAD(&dev->attach_allowed);
+
+ dev->post_thread = kthread_run(kthread_worker_fn,
+ &dev->post_worker, dev->base.name);
+ if (IS_ERR(dev->post_thread)) {
+ ret = PTR_ERR(dev->post_thread);
+ dev->post_thread = NULL;
+
+ pr_err("%s: failed to run config posting thread: %d\n",
+ __func__, ret);
+ goto err;
+ }
+ init_kthread_work(&dev->post_work, adf_post_work_func);
+
+ ret = adf_device_sysfs_init(dev);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+
+err:
+ adf_device_destroy(dev);
+ return ret;
+}
+EXPORT_SYMBOL(adf_device_init);
+
+/**
+ * adf_device_destroy - clean up ADF-internal data for a display device
+ *
+ * @dev: the display device
+ */
+void adf_device_destroy(struct adf_device *dev)
+{
+ struct adf_attachment_list *entry, *next;
+
+ idr_destroy(&dev->interfaces);
+ idr_destroy(&dev->overlay_engines);
+
+ if (dev->post_thread) {
+ flush_kthread_worker(&dev->post_worker);
+ kthread_stop(dev->post_thread);
+ }
+
+ if (dev->onscreen)
+ adf_post_cleanup(dev, dev->onscreen);
+ adf_device_sysfs_destroy(dev);
+ list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
+ adf_attachment_free(entry);
+ }
+ list_for_each_entry_safe(entry, next, &dev->attached, head) {
+ adf_attachment_free(entry);
+ }
+ mutex_destroy(&dev->post_lock);
+ mutex_destroy(&dev->client_lock);
+ adf_obj_destroy(&dev->base, &adf_devices);
+}
+EXPORT_SYMBOL(adf_device_destroy);
+
+/**
+ * adf_interface_init - initialize ADF-internal data for a display interface
+ * and create sysfs entries
+ *
+ * @intf: the display interface
+ * @dev: the interface's "parent" display device
+ * @type: interface type (see enum @adf_interface_type)
+ * @idx: which interface of type @type;
+ * e.g. interface DSI.1 -> @type=%ADF_INTF_TYPE_DSI, @idx=1
+ * @flags: informational flags (bitmask of %ADF_INTF_FLAG_* values)
+ * @ops: the interface's associated ops
+ * @fmt: formatting string for the display interface's name
+ *
+ * @dev must have previously been initialized with adf_device_init().
+ *
+ * @fmt affects the name returned to userspace through the
+ * %ADF_GET_INTERFACE_DATA ioctl. It does not affect the sysfs filename,
+ * which is derived from @dev's name.
+ *
+ * Returns 0 on success or error code (<0) on failure.
+ */
+int adf_interface_init(struct adf_interface *intf, struct adf_device *dev,
+ enum adf_interface_type type, u32 idx, u32 flags,
+ const struct adf_interface_ops *ops, const char *fmt, ...)
+{
+ int ret;
+ va_list args;
+ const u32 allowed_flags = ADF_INTF_FLAG_PRIMARY |
+ ADF_INTF_FLAG_EXTERNAL;
+
+ if (dev->n_interfaces == ADF_MAX_INTERFACES) {
+ pr_err("%s: parent device %s has too many interfaces\n",
+ __func__, dev->base.name);
+ return -ENOMEM;
+ }
+
+ if (type >= ADF_INTF_MEMORY && type <= ADF_INTF_TYPE_DEVICE_CUSTOM) {
+ pr_err("%s: invalid interface type %u\n", __func__, type);
+ return -EINVAL;
+ }
+
+ if (flags & ~allowed_flags) {
+ pr_err("%s: invalid interface flags 0x%X\n", __func__,
+ flags & ~allowed_flags);
+ return -EINVAL;
+ }
+
+ memset(intf, 0, sizeof(*intf));
+
+ va_start(args, fmt);
+ ret = adf_obj_init(&intf->base, ADF_OBJ_INTERFACE, &dev->interfaces,
+ dev, ops ? &ops->base : NULL, fmt, args);
+ va_end(args);
+ if (ret < 0)
+ return ret;
+
+ intf->type = type;
+ intf->idx = idx;
+ intf->flags = flags;
+ intf->ops = ops;
+ intf->dpms_state = DRM_MODE_DPMS_OFF;
+ init_waitqueue_head(&intf->vsync_wait);
+ rwlock_init(&intf->vsync_lock);
+ rwlock_init(&intf->hotplug_modelist_lock);
+
+ ret = adf_interface_sysfs_init(intf);
+ if (ret < 0)
+ goto err;
+ dev->n_interfaces++;
+
+ return 0;
+
+err:
+ adf_obj_destroy(&intf->base, &dev->interfaces);
+ return ret;
+}
+EXPORT_SYMBOL(adf_interface_init);
+
+/**
+ * adf_interface_destroy - clean up ADF-internal data for a display interface
+ *
+ * @intf: the display interface
+ */
+void adf_interface_destroy(struct adf_interface *intf)
+{
+ struct adf_device *dev = adf_interface_parent(intf);
+ struct adf_attachment_list *entry, *next;
+
+ mutex_lock(&dev->client_lock);
+ list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
+ if (entry->attachment.interface == intf) {
+ adf_attachment_free(entry);
+ dev->n_attach_allowed--;
+ }
+ }
+ list_for_each_entry_safe(entry, next, &dev->attached, head) {
+ if (entry->attachment.interface == intf) {
+ adf_device_detach_op(dev,
+ entry->attachment.overlay_engine, intf);
+ adf_attachment_free(entry);
+ dev->n_attached--;
+ }
+ }
+ kfree(intf->modelist);
+ adf_interface_sysfs_destroy(intf);
+ adf_obj_destroy(&intf->base, &dev->interfaces);
+ dev->n_interfaces--;
+ mutex_unlock(&dev->client_lock);
+}
+EXPORT_SYMBOL(adf_interface_destroy);
+
+static bool adf_overlay_engine_has_custom_formats(
+ const struct adf_overlay_engine_ops *ops)
+{
+ size_t i;
+ for (i = 0; i < ops->n_supported_formats; i++)
+ if (!adf_format_is_standard(ops->supported_formats[i]))
+ return true;
+ return false;
+}
+
+/**
+ * adf_overlay_engine_init - initialize ADF-internal data for an
+ * overlay engine and create sysfs entries
+ *
+ * @eng: the overlay engine
+ * @dev: the overlay engine's "parent" display device
+ * @ops: the overlay engine's associated ops
+ * @fmt: formatting string for the overlay engine's name
+ *
+ * @dev must have previously been initialized with adf_device_init().
+ *
+ * @fmt affects the name returned to userspace through the
+ * %ADF_GET_OVERLAY_ENGINE_DATA ioctl. It does not affect the sysfs filename,
+ * which is derived from @dev's name.
+ *
+ * Returns 0 on success or error code (<0) on failure.
+ */
+int adf_overlay_engine_init(struct adf_overlay_engine *eng,
+ struct adf_device *dev,
+ const struct adf_overlay_engine_ops *ops, const char *fmt, ...)
+{
+ int ret;
+ va_list args;
+
+ if (!ops->supported_formats) {
+ pr_err("%s: overlay engine must support at least one format\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (ops->n_supported_formats > ADF_MAX_SUPPORTED_FORMATS) {
+ pr_err("%s: overlay engine supports too many formats\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (adf_overlay_engine_has_custom_formats(ops) &&
+ !dev->ops->validate_custom_format) {
+ pr_err("%s: overlay engine has custom formats but parent device %s does not implement validate_custom_format\n",
+ __func__, dev->base.name);
+ return -EINVAL;
+ }
+
+ memset(eng, 0, sizeof(*eng));
+
+ va_start(args, fmt);
+ ret = adf_obj_init(&eng->base, ADF_OBJ_OVERLAY_ENGINE,
+ &dev->overlay_engines, dev, &ops->base, fmt, args);
+ va_end(args);
+ if (ret < 0)
+ return ret;
+
+ eng->ops = ops;
+
+ ret = adf_overlay_engine_sysfs_init(eng);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+
+err:
+ adf_obj_destroy(&eng->base, &dev->overlay_engines);
+ return ret;
+}
+EXPORT_SYMBOL(adf_overlay_engine_init);
+
+/**
+ * adf_interface_destroy - clean up ADF-internal data for an overlay engine
+ *
+ * @eng: the overlay engine
+ */
+void adf_overlay_engine_destroy(struct adf_overlay_engine *eng)
+{
+ struct adf_device *dev = adf_overlay_engine_parent(eng);
+ struct adf_attachment_list *entry, *next;
+
+ mutex_lock(&dev->client_lock);
+ list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
+ if (entry->attachment.overlay_engine == eng) {
+ adf_attachment_free(entry);
+ dev->n_attach_allowed--;
+ }
+ }
+ list_for_each_entry_safe(entry, next, &dev->attached, head) {
+ if (entry->attachment.overlay_engine == eng) {
+ adf_device_detach_op(dev, eng,
+ entry->attachment.interface);
+ adf_attachment_free(entry);
+ dev->n_attached--;
+ }
+ }
+ adf_overlay_engine_sysfs_destroy(eng);
+ adf_obj_destroy(&eng->base, &dev->overlay_engines);
+ mutex_unlock(&dev->client_lock);
+}
+EXPORT_SYMBOL(adf_overlay_engine_destroy);
+
+struct adf_attachment_list *adf_attachment_find(struct list_head *list,
+ struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+ struct adf_attachment_list *entry;
+ list_for_each_entry(entry, list, head) {
+ if (entry->attachment.interface == intf &&
+ entry->attachment.overlay_engine == eng)
+ return entry;
+ }
+ return NULL;
+}
+
+int adf_attachment_validate(struct adf_device *dev,
+ struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+ struct adf_device *intf_dev = adf_interface_parent(intf);
+ struct adf_device *eng_dev = adf_overlay_engine_parent(eng);
+
+ if (intf_dev != dev) {
+ dev_err(&dev->base.dev, "can't attach interface %s belonging to device %s\n",
+ intf->base.name, intf_dev->base.name);
+ return -EINVAL;
+ }
+
+ if (eng_dev != dev) {
+ dev_err(&dev->base.dev, "can't attach overlay engine %s belonging to device %s\n",
+ eng->base.name, eng_dev->base.name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * adf_attachment_allow - add a new entry to the list of allowed
+ * attachments
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ *
+ * adf_attachment_allow() indicates that the underlying display hardware allows
+ * @intf to scan out @eng's output. It is intended to be called at
+ * driver initialization for each supported overlay engine + interface pair.
+ *
+ * Returns 0 on success, -%EALREADY if the entry already exists, or -errno on
+ * any other failure.
+ */
+int adf_attachment_allow(struct adf_device *dev,
+ struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+ int ret;
+ struct adf_attachment_list *entry = NULL;
+
+ ret = adf_attachment_validate(dev, eng, intf);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&dev->client_lock);
+
+ if (dev->n_attach_allowed == ADF_MAX_ATTACHMENTS) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ if (adf_attachment_find(&dev->attach_allowed, eng, intf)) {
+ ret = -EALREADY;
+ goto done;
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ entry->attachment.interface = intf;
+ entry->attachment.overlay_engine = eng;
+ list_add_tail(&entry->head, &dev->attach_allowed);
+ dev->n_attach_allowed++;
+
+done:
+ mutex_unlock(&dev->client_lock);
+ if (ret < 0)
+ kfree(entry);
+
+ return ret;
+}
+
+/**
+ * adf_obj_type_str - string representation of an adf_obj_type
+ *
+ * @type: the object type
+ */
+const char *adf_obj_type_str(enum adf_obj_type type)
+{
+ switch (type) {
+ case ADF_OBJ_OVERLAY_ENGINE:
+ return "overlay engine";
+
+ case ADF_OBJ_INTERFACE:
+ return "interface";
+
+ case ADF_OBJ_DEVICE:
+ return "device";
+
+ default:
+ return "unknown";
+ }
+}
+EXPORT_SYMBOL(adf_obj_type_str);
+
+/**
+ * adf_interface_type_str - string representation of an adf_interface's type
+ *
+ * @intf: the interface
+ */
+const char *adf_interface_type_str(struct adf_interface *intf)
+{
+ switch (intf->type) {
+ case ADF_INTF_DSI:
+ return "DSI";
+
+ case ADF_INTF_eDP:
+ return "eDP";
+
+ case ADF_INTF_DPI:
+ return "DPI";
+
+ case ADF_INTF_VGA:
+ return "VGA";
+
+ case ADF_INTF_DVI:
+ return "DVI";
+
+ case ADF_INTF_HDMI:
+ return "HDMI";
+
+ case ADF_INTF_MEMORY:
+ return "memory";
+
+ default:
+ if (intf->type >= ADF_INTF_TYPE_DEVICE_CUSTOM) {
+ if (intf->ops && intf->ops->type_str)
+ return intf->ops->type_str(intf);
+ return "custom";
+ }
+ return "unknown";
+ }
+}
+EXPORT_SYMBOL(adf_interface_type_str);
+
+/**
+ * adf_event_type_str - string representation of an adf_event_type
+ *
+ * @obj: ADF object that produced the event
+ * @type: event type
+ */
+const char *adf_event_type_str(struct adf_obj *obj, enum adf_event_type type)
+{
+ switch (type) {
+ case ADF_EVENT_VSYNC:
+ return "vsync";
+
+ case ADF_EVENT_HOTPLUG:
+ return "hotplug";
+
+ default:
+ if (type >= ADF_EVENT_DEVICE_CUSTOM) {
+ if (obj->ops && obj->ops->event_type_str)
+ return obj->ops->event_type_str(obj, type);
+ return "custom";
+ }
+ return "unknown";
+ }
+}
+EXPORT_SYMBOL(adf_event_type_str);
+
+/**
+ * adf_format_str - string representation of an ADF/DRM fourcc format
+ *
+ * @format: format fourcc
+ * @buf: target buffer for the format's string representation
+ */
+void adf_format_str(u32 format, char buf[ADF_FORMAT_STR_SIZE])
+{
+ buf[0] = format & 0xFF;
+ buf[1] = (format >> 8) & 0xFF;
+ buf[2] = (format >> 16) & 0xFF;
+ buf[3] = (format >> 24) & 0xFF;
+ buf[4] = '\0';
+}
+EXPORT_SYMBOL(adf_format_str);
+
+/**
+ * adf_format_validate_yuv - validate the number and size of planes in buffers
+ * with a custom YUV format.
+ *
+ * @dev: ADF device performing the validation
+ * @buf: buffer to validate
+ * @num_planes: expected number of planes
+ * @hsub: expected horizontal chroma subsampling factor, in pixels
+ * @vsub: expected vertical chroma subsampling factor, in pixels
+ * @cpp: expected bytes per pixel for each plane (length @num_planes)
+ *
+ * adf_format_validate_yuv() is intended to be called as a helper from @dev's
+ * validate_custom_format() op.
+ *
+ * Returns 0 if @buf has the expected number of planes and each plane
+ * has sufficient size, or -EINVAL otherwise.
+ */
+int adf_format_validate_yuv(struct adf_device *dev, struct adf_buffer *buf,
+ u8 num_planes, u8 hsub, u8 vsub, u8 cpp[])
+{
+ u8 i;
+
+ if (num_planes != buf->n_planes) {
+ char format_str[ADF_FORMAT_STR_SIZE];
+ adf_format_str(buf->format, format_str);
+ dev_err(&dev->base.dev, "%u planes expected for format %s but %u planes provided\n",
+ num_planes, format_str, buf->n_planes);
+ return -EINVAL;
+ }
+
+ if (buf->w == 0 || buf->w % hsub) {
+ dev_err(&dev->base.dev, "bad buffer width %u\n", buf->w);
+ return -EINVAL;
+ }
+
+ if (buf->h == 0 || buf->h % vsub) {
+ dev_err(&dev->base.dev, "bad buffer height %u\n", buf->h);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_planes; i++) {
+ u32 width = buf->w / (i != 0 ? hsub : 1);
+ u32 height = buf->h / (i != 0 ? vsub : 1);
+ u8 cpp = adf_format_plane_cpp(buf->format, i);
+
+ if (buf->pitch[i] < (u64) width * cpp) {
+ dev_err(&dev->base.dev, "plane %u pitch is shorter than buffer width (pitch = %u, width = %u, bpp = %u)\n",
+ i, buf->pitch[i], width, cpp * 8);
+ return -EINVAL;
+ }
+
+ if ((u64) height * buf->pitch[i] + buf->offset[i] >
+ buf->dma_bufs[i]->size) {
+ dev_err(&dev->base.dev, "plane %u buffer too small (height = %u, pitch = %u, offset = %u, size = %zu)\n",
+ i, height, buf->pitch[i],
+ buf->offset[i], buf->dma_bufs[i]->size);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(adf_format_validate_yuv);
+
+/**
+ * adf_modeinfo_set_name - sets the name of a mode from its display resolution
+ *
+ * @mode: mode
+ *
+ * adf_modeinfo_set_name() fills in @mode->name in the format
+ * "[hdisplay]x[vdisplay](i)". It is intended to help drivers create
+ * ADF/DRM-style modelists from other mode formats.
+ */
+void adf_modeinfo_set_name(struct drm_mode_modeinfo *mode)
+{
+ bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
+
+ snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
+ mode->hdisplay, mode->vdisplay,
+ interlaced ? "i" : "");
+}
+EXPORT_SYMBOL(adf_modeinfo_set_name);
+
+/**
+ * adf_modeinfo_set_vrefresh - sets the vrefresh of a mode from its other
+ * timing data
+ *
+ * @mode: mode
+ *
+ * adf_modeinfo_set_vrefresh() calculates @mode->vrefresh from
+ * @mode->{h,v}display and @mode->flags. It is intended to help drivers
+ * create ADF/DRM-style modelists from other mode formats.
+ */
+void adf_modeinfo_set_vrefresh(struct drm_mode_modeinfo *mode)
+{
+ int refresh = 0;
+ unsigned int calc_val;
+
+ if (mode->vrefresh > 0)
+ return;
+
+ if (mode->htotal <= 0 || mode->vtotal <= 0)
+ return;
+
+ /* work out vrefresh the value will be x1000 */
+ calc_val = (mode->clock * 1000);
+ calc_val /= mode->htotal;
+ refresh = (calc_val + mode->vtotal / 2) / mode->vtotal;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ refresh *= 2;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ refresh /= 2;
+ if (mode->vscan > 1)
+ refresh /= mode->vscan;
+
+ mode->vrefresh = refresh;
+}
+EXPORT_SYMBOL(adf_modeinfo_set_vrefresh);
+
+static int __init adf_init(void)
+{
+ int err;
+
+ err = adf_sysfs_init();
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void __exit adf_exit(void)
+{
+ adf_sysfs_destroy();
+}
+
+module_init(adf_init);
+module_exit(adf_exit);
diff --git a/drivers/video/adf/adf.h b/drivers/video/adf/adf.h
new file mode 100644
index 00000000000..3bcf1fabc23
--- /dev/null
+++ b/drivers/video/adf/adf.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VIDEO_ADF_ADF_H
+#define __VIDEO_ADF_ADF_H
+
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <video/adf.h>
+#include "sync.h"
+
+struct adf_event_refcount {
+ struct rb_node node;
+ enum adf_event_type type;
+ int refcount;
+};
+
+void adf_buffer_cleanup(struct adf_buffer *buf);
+void adf_buffer_mapping_cleanup(struct adf_buffer_mapping *mapping,
+ struct adf_buffer *buf);
+void adf_post_cleanup(struct adf_device *dev, struct adf_pending_post *post);
+
+struct adf_attachment_list *adf_attachment_find(struct list_head *list,
+ struct adf_overlay_engine *eng, struct adf_interface *intf);
+int adf_attachment_validate(struct adf_device *dev,
+ struct adf_overlay_engine *eng, struct adf_interface *intf);
+void adf_attachment_free(struct adf_attachment_list *attachment);
+
+struct adf_event_refcount *adf_obj_find_event_refcount(struct adf_obj *obj,
+ enum adf_event_type type);
+
+static inline int adf_obj_check_supports_event(struct adf_obj *obj,
+ enum adf_event_type type)
+{
+ if (!obj->ops || !obj->ops->supports_event)
+ return -EOPNOTSUPP;
+ if (!obj->ops->supports_event(obj, type))
+ return -EINVAL;
+ return 0;
+}
+
+static inline int adf_device_attach_op(struct adf_device *dev,
+ struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+ if (!dev->ops->attach)
+ return 0;
+
+ return dev->ops->attach(dev, eng, intf);
+}
+
+static inline int adf_device_detach_op(struct adf_device *dev,
+ struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+ if (!dev->ops->detach)
+ return 0;
+
+ return dev->ops->detach(dev, eng, intf);
+}
+
+#endif /* __VIDEO_ADF_ADF_H */
diff --git a/drivers/video/adf/adf_client.c b/drivers/video/adf/adf_client.c
new file mode 100644
index 00000000000..bba873d34bb
--- /dev/null
+++ b/drivers/video/adf/adf_client.c
@@ -0,0 +1,810 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include "sw_sync.h"
+
+#include <video/adf.h>
+#include <video/adf_client.h>
+#include <video/adf_format.h>
+
+#include "adf.h"
+
+static inline bool vsync_active(u8 state)
+{
+ return state == DRM_MODE_DPMS_ON || state == DRM_MODE_DPMS_STANDBY;
+}
+
+/**
+ * adf_interface_blank - set interface's DPMS state
+ *
+ * @intf: the interface
+ * @state: one of %DRM_MODE_DPMS_*
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_interface_blank(struct adf_interface *intf, u8 state)
+{
+ struct adf_device *dev = adf_interface_parent(intf);
+ u8 prev_state;
+ bool disable_vsync;
+ bool enable_vsync;
+ int ret = 0;
+ struct adf_event_refcount *vsync_refcount;
+
+ if (!intf->ops || !intf->ops->blank)
+ return -EOPNOTSUPP;
+
+ if (state > DRM_MODE_DPMS_OFF)
+ return -EINVAL;
+
+ mutex_lock(&dev->client_lock);
+ if (state != DRM_MODE_DPMS_ON)
+ flush_kthread_worker(&dev->post_worker);
+ mutex_lock(&intf->base.event_lock);
+
+ vsync_refcount = adf_obj_find_event_refcount(&intf->base,
+ ADF_EVENT_VSYNC);
+ if (!vsync_refcount) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ prev_state = intf->dpms_state;
+ if (prev_state == state) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ disable_vsync = vsync_active(prev_state) &&
+ !vsync_active(state) &&
+ vsync_refcount->refcount;
+ enable_vsync = !vsync_active(prev_state) &&
+ vsync_active(state) &&
+ vsync_refcount->refcount;
+
+ if (disable_vsync)
+ intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
+ false);
+
+ ret = intf->ops->blank(intf, state);
+ if (ret < 0) {
+ if (disable_vsync)
+ intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
+ true);
+ goto done;
+ }
+
+ if (enable_vsync)
+ intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
+ true);
+
+ intf->dpms_state = state;
+done:
+ mutex_unlock(&intf->base.event_lock);
+ mutex_unlock(&dev->client_lock);
+ return ret;
+}
+EXPORT_SYMBOL(adf_interface_blank);
+
+/**
+ * adf_interface_blank - get interface's current DPMS state
+ *
+ * @intf: the interface
+ *
+ * Returns one of %DRM_MODE_DPMS_*.
+ */
+u8 adf_interface_dpms_state(struct adf_interface *intf)
+{
+ struct adf_device *dev = adf_interface_parent(intf);
+ u8 dpms_state;
+
+ mutex_lock(&dev->client_lock);
+ dpms_state = intf->dpms_state;
+ mutex_unlock(&dev->client_lock);
+
+ return dpms_state;
+}
+EXPORT_SYMBOL(adf_interface_dpms_state);
+
+/**
+ * adf_interface_current_mode - get interface's current display mode
+ *
+ * @intf: the interface
+ * @mode: returns the current mode
+ */
+void adf_interface_current_mode(struct adf_interface *intf,
+ struct drm_mode_modeinfo *mode)
+{
+ struct adf_device *dev = adf_interface_parent(intf);
+
+ mutex_lock(&dev->client_lock);
+ memcpy(mode, &intf->current_mode, sizeof(*mode));
+ mutex_unlock(&dev->client_lock);
+}
+EXPORT_SYMBOL(adf_interface_current_mode);
+
+/**
+ * adf_interface_modelist - get interface's modelist
+ *
+ * @intf: the interface
+ * @modelist: storage for the modelist (optional)
+ * @n_modes: length of @modelist
+ *
+ * If @modelist is not NULL, adf_interface_modelist() will copy up to @n_modes
+ * modelist entries into @modelist.
+ *
+ * Returns the length of the modelist.
+ */
+size_t adf_interface_modelist(struct adf_interface *intf,
+ struct drm_mode_modeinfo *modelist, size_t n_modes)
+{
+ unsigned long flags;
+ size_t retval;
+
+ read_lock_irqsave(&intf->hotplug_modelist_lock, flags);
+ if (modelist)
+ memcpy(modelist, intf->modelist, sizeof(modelist[0]) *
+ min(n_modes, intf->n_modes));
+ retval = intf->n_modes;
+ read_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL(adf_interface_modelist);
+
+/**
+ * adf_interface_set_mode - set interface's display mode
+ *
+ * @intf: the interface
+ * @mode: the new mode
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_interface_set_mode(struct adf_interface *intf,
+ struct drm_mode_modeinfo *mode)
+{
+ struct adf_device *dev = adf_interface_parent(intf);
+ int ret = 0;
+
+ if (!intf->ops || !intf->ops->modeset)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&dev->client_lock);
+ flush_kthread_worker(&dev->post_worker);
+
+ ret = intf->ops->modeset(intf, mode);
+ if (ret < 0)
+ goto done;
+
+ memcpy(&intf->current_mode, mode, sizeof(*mode));
+done:
+ mutex_unlock(&dev->client_lock);
+ return ret;
+}
+EXPORT_SYMBOL(adf_interface_set_mode);
+
+/**
+ * adf_interface_screen_size - get size of screen connected to interface
+ *
+ * @intf: the interface
+ * @width_mm: returns the screen width in mm
+ * @height_mm: returns the screen width in mm
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_interface_get_screen_size(struct adf_interface *intf, u16 *width_mm,
+ u16 *height_mm)
+{
+ struct adf_device *dev = adf_interface_parent(intf);
+ int ret;
+
+ if (!intf->ops || !intf->ops->screen_size)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&dev->client_lock);
+ ret = intf->ops->screen_size(intf, width_mm, height_mm);
+ mutex_unlock(&dev->client_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(adf_interface_get_screen_size);
+
+/**
+ * adf_overlay_engine_supports_format - returns whether a format is in an
+ * overlay engine's supported list
+ *
+ * @eng: the overlay engine
+ * @format: format fourcc
+ */
+bool adf_overlay_engine_supports_format(struct adf_overlay_engine *eng,
+ u32 format)
+{
+ size_t i;
+ for (i = 0; i < eng->ops->n_supported_formats; i++)
+ if (format == eng->ops->supported_formats[i])
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(adf_overlay_engine_supports_format);
+
+static int adf_buffer_validate(struct adf_buffer *buf)
+{
+ struct adf_overlay_engine *eng = buf->overlay_engine;
+ struct device *dev = &eng->base.dev;
+ struct adf_device *parent = adf_overlay_engine_parent(eng);
+ u8 hsub, vsub, num_planes, cpp[ADF_MAX_PLANES], i;
+
+ if (!adf_overlay_engine_supports_format(eng, buf->format)) {
+ char format_str[ADF_FORMAT_STR_SIZE];
+ adf_format_str(buf->format, format_str);
+ dev_err(dev, "unsupported format %s\n", format_str);
+ return -EINVAL;
+ }
+
+ if (!adf_format_is_standard(buf->format))
+ return parent->ops->validate_custom_format(parent, buf);
+
+ hsub = adf_format_horz_chroma_subsampling(buf->format);
+ vsub = adf_format_vert_chroma_subsampling(buf->format);
+ num_planes = adf_format_num_planes(buf->format);
+ for (i = 0; i < num_planes; i++)
+ cpp[i] = adf_format_plane_cpp(buf->format, i);
+
+ return adf_format_validate_yuv(parent, buf, num_planes, hsub, vsub,
+ cpp);
+}
+
+static int adf_buffer_map(struct adf_device *dev, struct adf_buffer *buf,
+ struct adf_buffer_mapping *mapping)
+{
+ int ret = 0;
+ size_t i;
+
+ for (i = 0; i < buf->n_planes; i++) {
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sg_table;
+
+ attachment = dma_buf_attach(buf->dma_bufs[i], dev->dev);
+ if (IS_ERR(attachment)) {
+ ret = PTR_ERR(attachment);
+ dev_err(&dev->base.dev, "attaching plane %u failed: %d\n",
+ i, ret);
+ goto done;
+ }
+ mapping->attachments[i] = attachment;
+
+ sg_table = dma_buf_map_attachment(attachment, DMA_TO_DEVICE);
+ if (IS_ERR(sg_table)) {
+ ret = PTR_ERR(sg_table);
+ dev_err(&dev->base.dev, "mapping plane %u failed: %d",
+ i, ret);
+ goto done;
+ } else if (!sg_table) {
+ ret = -ENOMEM;
+ dev_err(&dev->base.dev, "mapping plane %u failed\n", i);
+ goto done;
+ }
+ mapping->sg_tables[i] = sg_table;
+ }
+
+done:
+ if (ret < 0)
+ adf_buffer_mapping_cleanup(mapping, buf);
+
+ return ret;
+}
+
+static struct sync_fence *adf_sw_complete_fence(struct adf_device *dev)
+{
+ struct sync_pt *pt;
+ struct sync_fence *complete_fence;
+
+ if (!dev->timeline) {
+ dev->timeline = sw_sync_timeline_create(dev->base.name);
+ if (!dev->timeline)
+ return ERR_PTR(-ENOMEM);
+ dev->timeline_max = 1;
+ }
+
+ dev->timeline_max++;
+ pt = sw_sync_pt_create(dev->timeline, dev->timeline_max);
+ if (!pt)
+ goto err_pt_create;
+ complete_fence = sync_fence_create(dev->base.name, pt);
+ if (!complete_fence)
+ goto err_fence_create;
+
+ return complete_fence;
+
+err_fence_create:
+ sync_pt_free(pt);
+err_pt_create:
+ dev->timeline_max--;
+ return ERR_PTR(-ENOSYS);
+}
+
+/**
+ * adf_device_post - flip to a new set of buffers
+ *
+ * @dev: device targeted by the flip
+ * @intfs: interfaces targeted by the flip
+ * @n_intfs: number of targeted interfaces
+ * @bufs: description of buffers displayed
+ * @n_bufs: number of buffers displayed
+ * @custom_data: driver-private data
+ * @custom_data_size: size of driver-private data
+ *
+ * adf_device_post() will copy @intfs, @bufs, and @custom_data, so they may
+ * point to variables on the stack. adf_device_post() also takes its own
+ * reference on each of the dma-bufs in @bufs. The adf_device_post_nocopy()
+ * variant transfers ownership of these resources to ADF instead.
+ *
+ * On success, returns a sync fence which signals when the buffers are removed
+ * from the screen. On failure, returns ERR_PTR(-errno).
+ */
+struct sync_fence *adf_device_post(struct adf_device *dev,
+ struct adf_interface **intfs, size_t n_intfs,
+ struct adf_buffer *bufs, size_t n_bufs, void *custom_data,
+ size_t custom_data_size)
+{
+ struct adf_interface **intfs_copy = NULL;
+ struct adf_buffer *bufs_copy = NULL;
+ void *custom_data_copy = NULL;
+ struct sync_fence *ret;
+ size_t i;
+
+ intfs_copy = kzalloc(sizeof(intfs_copy[0]) * n_intfs, GFP_KERNEL);
+ if (!intfs_copy)
+ return ERR_PTR(-ENOMEM);
+
+ bufs_copy = kzalloc(sizeof(bufs_copy[0]) * n_bufs, GFP_KERNEL);
+ if (!bufs_copy) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err_alloc;
+ }
+
+ custom_data_copy = kzalloc(custom_data_size, GFP_KERNEL);
+ if (!custom_data_copy) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err_alloc;
+ }
+
+ for (i = 0; i < n_bufs; i++) {
+ size_t j;
+ for (j = 0; j < bufs[i].n_planes; j++)
+ get_dma_buf(bufs[i].dma_bufs[j]);
+ }
+
+ memcpy(intfs_copy, intfs, sizeof(intfs_copy[0]) * n_intfs);
+ memcpy(bufs_copy, bufs, sizeof(bufs_copy[0]) * n_bufs);
+ memcpy(custom_data_copy, custom_data, custom_data_size);
+
+ ret = adf_device_post_nocopy(dev, intfs_copy, n_intfs, bufs_copy,
+ n_bufs, custom_data_copy, custom_data_size);
+ if (IS_ERR(ret))
+ goto err_post;
+
+ return ret;
+
+err_post:
+ for (i = 0; i < n_bufs; i++) {
+ size_t j;
+ for (j = 0; j < bufs[i].n_planes; j++)
+ dma_buf_put(bufs[i].dma_bufs[j]);
+ }
+err_alloc:
+ kfree(custom_data_copy);
+ kfree(bufs_copy);
+ kfree(intfs_copy);
+ return ret;
+}
+EXPORT_SYMBOL(adf_device_post);
+
+/**
+ * adf_device_post_nocopy - flip to a new set of buffers
+ *
+ * adf_device_post_nocopy() has the same behavior as adf_device_post(),
+ * except ADF does not copy @intfs, @bufs, or @custom_data, and it does
+ * not take an extra reference on the dma-bufs in @bufs.
+ *
+ * @intfs, @bufs, and @custom_data must point to buffers allocated by
+ * kmalloc(). On success, ADF takes ownership of these buffers and the dma-bufs
+ * in @bufs, and will kfree()/dma_buf_put() them when they are no longer needed.
+ * On failure, adf_device_post_nocopy() does NOT take ownership of these
+ * buffers or the dma-bufs, and the caller must clean them up.
+ *
+ * adf_device_post_nocopy() is mainly intended for implementing ADF's ioctls.
+ * Clients may find the nocopy variant useful in limited cases, but most should
+ * call adf_device_post() instead.
+ */
+struct sync_fence *adf_device_post_nocopy(struct adf_device *dev,
+ struct adf_interface **intfs, size_t n_intfs,
+ struct adf_buffer *bufs, size_t n_bufs,
+ void *custom_data, size_t custom_data_size)
+{
+ struct adf_pending_post *cfg;
+ struct adf_buffer_mapping *mappings;
+ struct sync_fence *ret;
+ size_t i;
+ int err;
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return ERR_PTR(-ENOMEM);
+
+ mappings = kzalloc(sizeof(mappings[0]) * n_bufs, GFP_KERNEL);
+ if (!mappings) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err_alloc;
+ }
+
+ mutex_lock(&dev->client_lock);
+
+ for (i = 0; i < n_bufs; i++) {
+ err = adf_buffer_validate(&bufs[i]);
+ if (err < 0) {
+ ret = ERR_PTR(err);
+ goto err_buf;
+ }
+
+ err = adf_buffer_map(dev, &bufs[i], &mappings[i]);
+ if (err < 0) {
+ ret = ERR_PTR(err);
+ goto err_buf;
+ }
+ }
+
+ INIT_LIST_HEAD(&cfg->head);
+ cfg->config.n_bufs = n_bufs;
+ cfg->config.bufs = bufs;
+ cfg->config.mappings = mappings;
+ cfg->config.custom_data = custom_data;
+ cfg->config.custom_data_size = custom_data_size;
+
+ err = dev->ops->validate(dev, &cfg->config, &cfg->state);
+ if (err < 0) {
+ ret = ERR_PTR(err);
+ goto err_buf;
+ }
+
+ mutex_lock(&dev->post_lock);
+
+ if (dev->ops->complete_fence)
+ ret = dev->ops->complete_fence(dev, &cfg->config,
+ cfg->state);
+ else
+ ret = adf_sw_complete_fence(dev);
+
+ if (IS_ERR(ret))
+ goto err_fence;
+
+ list_add_tail(&cfg->head, &dev->post_list);
+ queue_kthread_work(&dev->post_worker, &dev->post_work);
+ mutex_unlock(&dev->post_lock);
+ mutex_unlock(&dev->client_lock);
+ kfree(intfs);
+ return ret;
+
+err_fence:
+ mutex_unlock(&dev->post_lock);
+
+err_buf:
+ for (i = 0; i < n_bufs; i++)
+ adf_buffer_mapping_cleanup(&mappings[i], &bufs[i]);
+
+ mutex_unlock(&dev->client_lock);
+ kfree(mappings);
+
+err_alloc:
+ kfree(cfg);
+ return ret;
+}
+EXPORT_SYMBOL(adf_device_post_nocopy);
+
+static void adf_attachment_list_to_array(struct adf_device *dev,
+ struct list_head *src, struct adf_attachment *dst, size_t size)
+{
+ struct adf_attachment_list *entry;
+ size_t i = 0;
+
+ if (!dst)
+ return;
+
+ list_for_each_entry(entry, src, head) {
+ if (i == size)
+ return;
+ dst[i] = entry->attachment;
+ i++;
+ }
+}
+
+/**
+ * adf_device_attachments - get device's list of active attachments
+ *
+ * @dev: the device
+ * @attachments: storage for the attachment list (optional)
+ * @n_attachments: length of @attachments
+ *
+ * If @attachments is not NULL, adf_device_attachments() will copy up to
+ * @n_attachments entries into @attachments.
+ *
+ * Returns the length of the active attachment list.
+ */
+size_t adf_device_attachments(struct adf_device *dev,
+ struct adf_attachment *attachments, size_t n_attachments)
+{
+ size_t retval;
+
+ mutex_lock(&dev->client_lock);
+ adf_attachment_list_to_array(dev, &dev->attached, attachments,
+ n_attachments);
+ retval = dev->n_attached;
+ mutex_unlock(&dev->client_lock);
+
+ return retval;
+}
+EXPORT_SYMBOL(adf_device_attachments);
+
+/**
+ * adf_device_attachments_allowed - get device's list of allowed attachments
+ *
+ * @dev: the device
+ * @attachments: storage for the attachment list (optional)
+ * @n_attachments: length of @attachments
+ *
+ * If @attachments is not NULL, adf_device_attachments_allowed() will copy up to
+ * @n_attachments entries into @attachments.
+ *
+ * Returns the length of the allowed attachment list.
+ */
+size_t adf_device_attachments_allowed(struct adf_device *dev,
+ struct adf_attachment *attachments, size_t n_attachments)
+{
+ size_t retval;
+
+ mutex_lock(&dev->client_lock);
+ adf_attachment_list_to_array(dev, &dev->attach_allowed, attachments,
+ n_attachments);
+ retval = dev->n_attach_allowed;
+ mutex_unlock(&dev->client_lock);
+
+ return retval;
+}
+EXPORT_SYMBOL(adf_device_attachments_allowed);
+
+/**
+ * adf_device_attached - return whether an overlay engine and interface are
+ * attached
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ */
+bool adf_device_attached(struct adf_device *dev, struct adf_overlay_engine *eng,
+ struct adf_interface *intf)
+{
+ struct adf_attachment_list *attachment;
+
+ mutex_lock(&dev->client_lock);
+ attachment = adf_attachment_find(&dev->attached, eng, intf);
+ mutex_unlock(&dev->client_lock);
+
+ return attachment != NULL;
+}
+EXPORT_SYMBOL(adf_device_attached);
+
+/**
+ * adf_device_attach_allowed - return whether the ADF device supports attaching
+ * an overlay engine and interface
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ */
+bool adf_device_attach_allowed(struct adf_device *dev,
+ struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+ struct adf_attachment_list *attachment;
+
+ mutex_lock(&dev->client_lock);
+ attachment = adf_attachment_find(&dev->attach_allowed, eng, intf);
+ mutex_unlock(&dev->client_lock);
+
+ return attachment != NULL;
+}
+EXPORT_SYMBOL(adf_device_attach_allowed);
+/**
+ * adf_device_attach - attach an overlay engine to an interface
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ *
+ * Returns 0 on success, -%EINVAL if attaching @intf and @eng is not allowed,
+ * -%EALREADY if @intf and @eng are already attached, or -errno on any other
+ * failure.
+ */
+int adf_device_attach(struct adf_device *dev, struct adf_overlay_engine *eng,
+ struct adf_interface *intf)
+{
+ int ret;
+ struct adf_attachment_list *attachment = NULL;
+
+ ret = adf_attachment_validate(dev, eng, intf);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&dev->client_lock);
+
+ if (dev->n_attached == ADF_MAX_ATTACHMENTS) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ if (!adf_attachment_find(&dev->attach_allowed, eng, intf)) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (adf_attachment_find(&dev->attached, eng, intf)) {
+ ret = -EALREADY;
+ goto done;
+ }
+
+ ret = adf_device_attach_op(dev, eng, intf);
+ if (ret < 0)
+ goto done;
+
+ attachment = kzalloc(sizeof(*attachment), GFP_KERNEL);
+ if (!attachment) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ attachment->attachment.interface = intf;
+ attachment->attachment.overlay_engine = eng;
+ list_add_tail(&attachment->head, &dev->attached);
+ dev->n_attached++;
+
+done:
+ mutex_unlock(&dev->client_lock);
+ if (ret < 0)
+ kfree(attachment);
+
+ return ret;
+}
+EXPORT_SYMBOL(adf_device_attach);
+
+/**
+ * adf_device_detach - detach an overlay engine from an interface
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ *
+ * Returns 0 on success, -%EINVAL if @intf and @eng are not attached,
+ * or -errno on any other failure.
+ */
+int adf_device_detach(struct adf_device *dev, struct adf_overlay_engine *eng,
+ struct adf_interface *intf)
+{
+ int ret;
+ struct adf_attachment_list *attachment;
+
+ ret = adf_attachment_validate(dev, eng, intf);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&dev->client_lock);
+
+ attachment = adf_attachment_find(&dev->attached, eng, intf);
+ if (!attachment) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = adf_device_detach_op(dev, eng, intf);
+ if (ret < 0)
+ goto done;
+
+ adf_attachment_free(attachment);
+ dev->n_attached--;
+done:
+ mutex_unlock(&dev->client_lock);
+ return ret;
+}
+EXPORT_SYMBOL(adf_device_detach);
+
+/**
+ * adf_interface_simple_buffer_alloc - allocate a simple buffer
+ *
+ * @intf: target interface
+ * @w: width in pixels
+ * @h: height in pixels
+ * @format: format fourcc
+ * @dma_buf: returns the allocated buffer
+ * @offset: returns the byte offset of the allocated buffer's first pixel
+ * @pitch: returns the allocated buffer's pitch
+ *
+ * See &struct adf_simple_buffer_alloc for a description of simple buffers and
+ * their limitations.
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_interface_simple_buffer_alloc(struct adf_interface *intf, u16 w, u16 h,
+ u32 format, struct dma_buf **dma_buf, u32 *offset, u32 *pitch)
+{
+ if (!intf->ops || !intf->ops->alloc_simple_buffer)
+ return -EOPNOTSUPP;
+
+ if (!adf_format_is_rgb(format))
+ return -EINVAL;
+
+ return intf->ops->alloc_simple_buffer(intf, w, h, format, dma_buf,
+ offset, pitch);
+}
+EXPORT_SYMBOL(adf_interface_simple_buffer_alloc);
+
+/**
+ * adf_interface_simple_post - flip to a single buffer
+ *
+ * @intf: interface targeted by the flip
+ * @buf: buffer to display
+ *
+ * adf_interface_simple_post() can be used generically for simple display
+ * configurations, since the client does not need to provide any driver-private
+ * configuration data.
+ *
+ * adf_interface_simple_post() has the same copying semantics as
+ * adf_device_post().
+ *
+ * On success, returns a sync fence which signals when the buffer is removed
+ * from the screen. On failure, returns ERR_PTR(-errno).
+ */
+struct sync_fence *adf_interface_simple_post(struct adf_interface *intf,
+ struct adf_buffer *buf)
+{
+ size_t custom_data_size = 0;
+ void *custom_data = NULL;
+ struct sync_fence *ret;
+
+ if (intf->ops && intf->ops->describe_simple_post) {
+ int err;
+
+ custom_data = kzalloc(ADF_MAX_CUSTOM_DATA_SIZE, GFP_KERNEL);
+ if (!custom_data) {
+ ret = ERR_PTR(-ENOMEM);
+ goto done;
+ }
+
+ err = intf->ops->describe_simple_post(intf, buf, custom_data,
+ &custom_data_size);
+ if (err < 0) {
+ ret = ERR_PTR(err);
+ goto done;
+ }
+ }
+
+ ret = adf_device_post(adf_interface_parent(intf), &intf, 1, buf, 1,
+ custom_data, custom_data_size);
+done:
+ kfree(custom_data);
+ return ret;
+}
+EXPORT_SYMBOL(adf_interface_simple_post);
diff --git a/drivers/video/adf/adf_fbdev.c b/drivers/video/adf/adf_fbdev.c
new file mode 100644
index 00000000000..cac34d14cbc
--- /dev/null
+++ b/drivers/video/adf/adf_fbdev.c
@@ -0,0 +1,651 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/vmalloc.h>
+
+#include <video/adf.h>
+#include <video/adf_client.h>
+#include <video/adf_fbdev.h>
+#include <video/adf_format.h>
+
+#include "adf.h"
+
+struct adf_fbdev_format {
+ u32 fourcc;
+ u32 bpp;
+ u32 r_length;
+ u32 g_length;
+ u32 b_length;
+ u32 a_length;
+ u32 r_offset;
+ u32 g_offset;
+ u32 b_offset;
+ u32 a_offset;
+};
+
+static const struct adf_fbdev_format format_table[] = {
+ {DRM_FORMAT_RGB332, 8, 3, 3, 2, 0, 5, 2, 0, 0},
+ {DRM_FORMAT_BGR233, 8, 3, 3, 2, 0, 0, 3, 5, 0},
+
+ {DRM_FORMAT_XRGB4444, 16, 4, 4, 4, 0, 8, 4, 0, 0},
+ {DRM_FORMAT_XBGR4444, 16, 4, 4, 4, 0, 0, 4, 8, 0},
+ {DRM_FORMAT_RGBX4444, 16, 4, 4, 4, 0, 12, 8, 4, 0},
+ {DRM_FORMAT_BGRX4444, 16, 4, 4, 4, 0, 0, 4, 8, 0},
+
+ {DRM_FORMAT_ARGB4444, 16, 4, 4, 4, 4, 8, 4, 0, 12},
+ {DRM_FORMAT_ABGR4444, 16, 4, 4, 4, 4, 0, 4, 8, 12},
+ {DRM_FORMAT_RGBA4444, 16, 4, 4, 4, 4, 12, 8, 4, 0},
+ {DRM_FORMAT_BGRA4444, 16, 4, 4, 4, 4, 0, 4, 8, 0},
+
+ {DRM_FORMAT_XRGB1555, 16, 5, 5, 5, 0, 10, 5, 0, 0},
+ {DRM_FORMAT_XBGR1555, 16, 5, 5, 5, 0, 0, 5, 10, 0},
+ {DRM_FORMAT_RGBX5551, 16, 5, 5, 5, 0, 11, 6, 1, 0},
+ {DRM_FORMAT_BGRX5551, 16, 5, 5, 5, 0, 1, 6, 11, 0},
+
+ {DRM_FORMAT_ARGB1555, 16, 5, 5, 5, 1, 10, 5, 0, 15},
+ {DRM_FORMAT_ABGR1555, 16, 5, 5, 5, 1, 0, 5, 10, 15},
+ {DRM_FORMAT_RGBA5551, 16, 5, 5, 5, 1, 11, 6, 1, 0},
+ {DRM_FORMAT_BGRA5551, 16, 5, 5, 5, 1, 1, 6, 11, 0},
+
+ {DRM_FORMAT_RGB565, 16, 5, 6, 5, 0, 11, 5, 0, 0},
+ {DRM_FORMAT_BGR565, 16, 5, 6, 5, 0, 0, 5, 11, 0},
+
+ {DRM_FORMAT_RGB888, 24, 8, 8, 8, 0, 16, 8, 0, 0},
+ {DRM_FORMAT_BGR888, 24, 8, 8, 8, 0, 0, 8, 16, 0},
+
+ {DRM_FORMAT_XRGB8888, 32, 8, 8, 8, 0, 16, 8, 0, 0},
+ {DRM_FORMAT_XBGR8888, 32, 8, 8, 8, 0, 0, 8, 16, 0},
+ {DRM_FORMAT_RGBX8888, 32, 8, 8, 8, 0, 24, 16, 8, 0},
+ {DRM_FORMAT_BGRX8888, 32, 8, 8, 8, 0, 8, 16, 24, 0},
+
+ {DRM_FORMAT_ARGB8888, 32, 8, 8, 8, 8, 16, 8, 0, 24},
+ {DRM_FORMAT_ABGR8888, 32, 8, 8, 8, 8, 0, 8, 16, 24},
+ {DRM_FORMAT_RGBA8888, 32, 8, 8, 8, 8, 24, 16, 8, 0},
+ {DRM_FORMAT_BGRA8888, 32, 8, 8, 8, 8, 8, 16, 24, 0},
+
+ {DRM_FORMAT_XRGB2101010, 32, 10, 10, 10, 0, 20, 10, 0, 0},
+ {DRM_FORMAT_XBGR2101010, 32, 10, 10, 10, 0, 0, 10, 20, 0},
+ {DRM_FORMAT_RGBX1010102, 32, 10, 10, 10, 0, 22, 12, 2, 0},
+ {DRM_FORMAT_BGRX1010102, 32, 10, 10, 10, 0, 2, 12, 22, 0},
+
+ {DRM_FORMAT_ARGB2101010, 32, 10, 10, 10, 2, 20, 10, 0, 30},
+ {DRM_FORMAT_ABGR2101010, 32, 10, 10, 10, 2, 0, 10, 20, 30},
+ {DRM_FORMAT_RGBA1010102, 32, 10, 10, 10, 2, 22, 12, 2, 0},
+ {DRM_FORMAT_BGRA1010102, 32, 10, 10, 10, 2, 2, 12, 22, 0},
+};
+
+static u32 drm_fourcc_from_fb_var(struct fb_var_screeninfo *var)
+{
+ size_t i;
+ for (i = 0; i < ARRAY_SIZE(format_table); i++) {
+ const struct adf_fbdev_format *f = &format_table[i];
+ if (var->red.length == f->r_length &&
+ var->red.offset == f->r_offset &&
+ var->green.length == f->g_length &&
+ var->green.offset == f->g_offset &&
+ var->blue.length == f->b_length &&
+ var->blue.offset == f->b_offset &&
+ var->transp.length == f->a_length &&
+ (var->transp.length == 0 ||
+ var->transp.offset == f->a_offset))
+ return f->fourcc;
+ }
+
+ return 0;
+}
+
+static const struct adf_fbdev_format *fbdev_format_info(u32 format)
+{
+ size_t i;
+ for (i = 0; i < ARRAY_SIZE(format_table); i++) {
+ const struct adf_fbdev_format *f = &format_table[i];
+ if (f->fourcc == format)
+ return f;
+ }
+
+ BUG();
+}
+
+void adf_modeinfo_to_fb_videomode(const struct drm_mode_modeinfo *mode,
+ struct fb_videomode *vmode)
+{
+ memset(vmode, 0, sizeof(*vmode));
+
+ vmode->refresh = mode->vrefresh;
+
+ vmode->xres = mode->hdisplay;
+ vmode->yres = mode->vdisplay;
+
+ vmode->pixclock = mode->clock ? KHZ2PICOS(mode->clock) : 0;
+ vmode->left_margin = mode->htotal - mode->hsync_end;
+ vmode->right_margin = mode->hsync_start - mode->hdisplay;
+ vmode->upper_margin = mode->vtotal - mode->vsync_end;
+ vmode->lower_margin = mode->vsync_start - mode->vdisplay;
+ vmode->hsync_len = mode->hsync_end - mode->hsync_start;
+ vmode->vsync_len = mode->vsync_end - mode->vsync_start;
+
+ vmode->sync = 0;
+ if (mode->flags | DRM_MODE_FLAG_PHSYNC)
+ vmode->sync |= FB_SYNC_HOR_HIGH_ACT;
+ if (mode->flags | DRM_MODE_FLAG_PVSYNC)
+ vmode->sync |= FB_SYNC_VERT_HIGH_ACT;
+ if (mode->flags | DRM_MODE_FLAG_PCSYNC)
+ vmode->sync |= FB_SYNC_COMP_HIGH_ACT;
+ if (mode->flags | DRM_MODE_FLAG_BCAST)
+ vmode->sync |= FB_SYNC_BROADCAST;
+
+ vmode->vmode = 0;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vmode->vmode |= FB_VMODE_INTERLACED;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ vmode->vmode |= FB_VMODE_DOUBLE;
+}
+EXPORT_SYMBOL(adf_modeinfo_to_fb_videomode);
+
+void adf_modeinfo_from_fb_videomode(const struct fb_videomode *vmode,
+ struct drm_mode_modeinfo *mode)
+{
+ memset(mode, 0, sizeof(*mode));
+
+ mode->hdisplay = vmode->xres;
+ mode->hsync_start = mode->hdisplay + vmode->right_margin;
+ mode->hsync_end = mode->hsync_start + vmode->hsync_len;
+ mode->htotal = mode->hsync_end + vmode->left_margin;
+
+ mode->vdisplay = vmode->yres;
+ mode->vsync_start = mode->vdisplay + vmode->lower_margin;
+ mode->vsync_end = mode->vsync_start + vmode->vsync_len;
+ mode->vtotal = mode->vsync_end + vmode->upper_margin;
+
+ mode->clock = vmode->pixclock ? PICOS2KHZ(vmode->pixclock) : 0;
+
+ mode->flags = 0;
+ if (vmode->sync & FB_SYNC_HOR_HIGH_ACT)
+ mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ if (vmode->sync & FB_SYNC_VERT_HIGH_ACT)
+ mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ if (vmode->sync & FB_SYNC_COMP_HIGH_ACT)
+ mode->flags |= DRM_MODE_FLAG_PCSYNC;
+ if (vmode->sync & FB_SYNC_BROADCAST)
+ mode->flags |= DRM_MODE_FLAG_BCAST;
+ if (vmode->vmode & FB_VMODE_INTERLACED)
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ if (vmode->vmode & FB_VMODE_DOUBLE)
+ mode->flags |= DRM_MODE_FLAG_DBLSCAN;
+
+ if (vmode->refresh)
+ mode->vrefresh = vmode->refresh;
+ else
+ adf_modeinfo_set_vrefresh(mode);
+
+ if (vmode->name)
+ strlcpy(mode->name, vmode->name, sizeof(mode->name));
+ else
+ adf_modeinfo_set_name(mode);
+}
+EXPORT_SYMBOL(adf_modeinfo_from_fb_videomode);
+
+static int adf_fbdev_post(struct adf_fbdev *fbdev)
+{
+ struct adf_buffer buf;
+ struct sync_fence *complete_fence;
+ int ret = 0;
+
+ memset(&buf, 0, sizeof(buf));
+ buf.overlay_engine = fbdev->eng;
+ buf.w = fbdev->info->var.xres;
+ buf.h = fbdev->info->var.yres;
+ buf.format = fbdev->format;
+ buf.dma_bufs[0] = fbdev->dma_buf;
+ buf.offset[0] = fbdev->offset +
+ fbdev->info->var.yoffset * fbdev->pitch +
+ fbdev->info->var.xoffset *
+ (fbdev->info->var.bits_per_pixel / 8);
+ buf.pitch[0] = fbdev->pitch;
+ buf.n_planes = 1;
+
+ complete_fence = adf_interface_simple_post(fbdev->intf, &buf);
+ if (IS_ERR(complete_fence)) {
+ ret = PTR_ERR(complete_fence);
+ goto done;
+ }
+
+ sync_fence_put(complete_fence);
+done:
+ return ret;
+}
+
+static const u16 vga_palette[][3] = {
+ {0x0000, 0x0000, 0x0000},
+ {0x0000, 0x0000, 0xAAAA},
+ {0x0000, 0xAAAA, 0x0000},
+ {0x0000, 0xAAAA, 0xAAAA},
+ {0xAAAA, 0x0000, 0x0000},
+ {0xAAAA, 0x0000, 0xAAAA},
+ {0xAAAA, 0x5555, 0x0000},
+ {0xAAAA, 0xAAAA, 0xAAAA},
+ {0x5555, 0x5555, 0x5555},
+ {0x5555, 0x5555, 0xFFFF},
+ {0x5555, 0xFFFF, 0x5555},
+ {0x5555, 0xFFFF, 0xFFFF},
+ {0xFFFF, 0x5555, 0x5555},
+ {0xFFFF, 0x5555, 0xFFFF},
+ {0xFFFF, 0xFFFF, 0x5555},
+ {0xFFFF, 0xFFFF, 0xFFFF},
+};
+
+static int adf_fb_alloc(struct adf_fbdev *fbdev)
+{
+ int ret;
+
+ ret = adf_interface_simple_buffer_alloc(fbdev->intf,
+ fbdev->default_xres_virtual,
+ fbdev->default_yres_virtual,
+ fbdev->default_format,
+ &fbdev->dma_buf, &fbdev->offset, &fbdev->pitch);
+ if (ret < 0) {
+ dev_err(fbdev->info->dev, "allocating fb failed: %d\n", ret);
+ return ret;
+ }
+
+ fbdev->vaddr = dma_buf_vmap(fbdev->dma_buf);
+ if (!fbdev->vaddr) {
+ ret = -ENOMEM;
+ dev_err(fbdev->info->dev, "vmapping fb failed\n");
+ goto err_vmap;
+ }
+ fbdev->info->fix.line_length = fbdev->pitch;
+ fbdev->info->var.xres_virtual = fbdev->default_xres_virtual;
+ fbdev->info->var.yres_virtual = fbdev->default_yres_virtual;
+ fbdev->info->fix.smem_len = fbdev->dma_buf->size;
+ fbdev->info->screen_base = fbdev->vaddr;
+
+ return 0;
+
+err_vmap:
+ dma_buf_put(fbdev->dma_buf);
+ return ret;
+}
+
+static void adf_fb_destroy(struct adf_fbdev *fbdev)
+{
+ dma_buf_vunmap(fbdev->dma_buf, fbdev->vaddr);
+ dma_buf_put(fbdev->dma_buf);
+}
+
+static void adf_fbdev_set_format(struct adf_fbdev *fbdev, u32 format)
+{
+ size_t i;
+ const struct adf_fbdev_format *info = fbdev_format_info(format);
+ for (i = 0; i < ARRAY_SIZE(vga_palette); i++) {
+ u16 r = vga_palette[i][0];
+ u16 g = vga_palette[i][1];
+ u16 b = vga_palette[i][2];
+
+ r >>= (16 - info->r_length);
+ g >>= (16 - info->g_length);
+ b >>= (16 - info->b_length);
+
+ fbdev->pseudo_palette[i] =
+ (r << info->r_offset) |
+ (g << info->g_offset) |
+ (b << info->b_offset);
+
+ if (info->a_length) {
+ u16 a = BIT(info->a_length) - 1;
+ fbdev->pseudo_palette[i] |= (a << info->a_offset);
+ }
+ }
+
+ fbdev->info->var.bits_per_pixel = adf_format_bpp(format);
+ fbdev->info->var.red.length = info->r_length;
+ fbdev->info->var.red.offset = info->r_offset;
+ fbdev->info->var.green.length = info->g_length;
+ fbdev->info->var.green.offset = info->g_offset;
+ fbdev->info->var.blue.length = info->b_length;
+ fbdev->info->var.blue.offset = info->b_offset;
+ fbdev->info->var.transp.length = info->a_length;
+ fbdev->info->var.transp.offset = info->a_offset;
+ fbdev->format = format;
+}
+
+static void adf_fbdev_fill_modelist(struct adf_fbdev *fbdev)
+{
+ struct drm_mode_modeinfo *modelist;
+ struct fb_videomode fbmode;
+ size_t n_modes, i;
+ int ret = 0;
+
+ n_modes = adf_interface_modelist(fbdev->intf, NULL, 0);
+ modelist = kzalloc(sizeof(modelist[0]) * n_modes, GFP_KERNEL);
+ if (!modelist) {
+ dev_warn(fbdev->info->dev, "allocating new modelist failed; keeping old modelist\n");
+ return;
+ }
+ adf_interface_modelist(fbdev->intf, modelist, n_modes);
+
+ fb_destroy_modelist(&fbdev->info->modelist);
+
+ for (i = 0; i < n_modes; i++) {
+ adf_modeinfo_to_fb_videomode(&modelist[i], &fbmode);
+ ret = fb_add_videomode(&fbmode, &fbdev->info->modelist);
+ if (ret < 0)
+ dev_warn(fbdev->info->dev, "adding mode %s to modelist failed: %d\n",
+ modelist[i].name, ret);
+ }
+
+ kfree(modelist);
+}
+
+/**
+ * adf_fbdev_open - default implementation of fbdev open op
+ */
+int adf_fbdev_open(struct fb_info *info, int user)
+{
+ struct adf_fbdev *fbdev = info->par;
+ int ret;
+
+ if (!fbdev->open) {
+ struct drm_mode_modeinfo mode;
+ struct fb_videomode fbmode;
+ struct adf_device *dev = adf_interface_parent(fbdev->intf);
+
+ ret = adf_device_attach(dev, fbdev->eng, fbdev->intf);
+ if (ret < 0 && ret != -EALREADY)
+ return ret;
+
+ ret = adf_fb_alloc(fbdev);
+ if (ret < 0)
+ return ret;
+
+ adf_interface_current_mode(fbdev->intf, &mode);
+ adf_modeinfo_to_fb_videomode(&mode, &fbmode);
+ fb_videomode_to_var(&fbdev->info->var, &fbmode);
+
+ adf_fbdev_set_format(fbdev, fbdev->default_format);
+ adf_fbdev_fill_modelist(fbdev);
+ }
+
+ ret = adf_fbdev_post(fbdev);
+ if (ret < 0) {
+ if (!fbdev->open)
+ adf_fb_destroy(fbdev);
+ return ret;
+ }
+
+ fbdev->open = true;
+ return 0;
+}
+EXPORT_SYMBOL(adf_fbdev_open);
+
+/**
+ * adf_fbdev_release - default implementation of fbdev release op
+ */
+int adf_fbdev_release(struct fb_info *info, int user)
+{
+ struct adf_fbdev *fbdev = info->par;
+ adf_fb_destroy(fbdev);
+ fbdev->open = false;
+ return 0;
+}
+EXPORT_SYMBOL(adf_fbdev_release);
+
+/**
+ * adf_fbdev_check_var - default implementation of fbdev check_var op
+ */
+int adf_fbdev_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ struct adf_fbdev *fbdev = info->par;
+ bool valid_format = true;
+ u32 format = drm_fourcc_from_fb_var(var);
+ u32 pitch = var->xres_virtual * var->bits_per_pixel / 8;
+
+ if (!format) {
+ dev_dbg(info->dev, "%s: unrecognized format\n", __func__);
+ valid_format = false;
+ }
+
+ if (valid_format && var->grayscale) {
+ dev_dbg(info->dev, "%s: grayscale modes not supported\n",
+ __func__);
+ valid_format = false;
+ }
+
+ if (valid_format && var->nonstd) {
+ dev_dbg(info->dev, "%s: nonstandard formats not supported\n",
+ __func__);
+ valid_format = false;
+ }
+
+ if (valid_format && !adf_overlay_engine_supports_format(fbdev->eng,
+ format)) {
+ char format_str[ADF_FORMAT_STR_SIZE];
+ adf_format_str(format, format_str);
+ dev_dbg(info->dev, "%s: format %s not supported by overlay engine %s\n",
+ __func__, format_str, fbdev->eng->base.name);
+ valid_format = false;
+ }
+
+ if (valid_format && pitch > fbdev->pitch) {
+ dev_dbg(info->dev, "%s: fb pitch too small for var (pitch = %u, xres_virtual = %u, bits_per_pixel = %u)\n",
+ __func__, fbdev->pitch, var->xres_virtual,
+ var->bits_per_pixel);
+ valid_format = false;
+ }
+
+ if (valid_format && var->yres_virtual > fbdev->default_yres_virtual) {
+ dev_dbg(info->dev, "%s: fb height too small for var (h = %u, yres_virtual = %u)\n",
+ __func__, fbdev->default_yres_virtual,
+ var->yres_virtual);
+ valid_format = false;
+ }
+
+ if (valid_format) {
+ var->activate = info->var.activate;
+ var->height = info->var.height;
+ var->width = info->var.width;
+ var->accel_flags = info->var.accel_flags;
+ var->rotate = info->var.rotate;
+ var->colorspace = info->var.colorspace;
+ /* userspace can't change these */
+ } else {
+ /* if any part of the format is invalid then fixing it up is
+ impractical, so save just the modesetting bits and
+ overwrite everything else */
+ struct fb_videomode mode;
+ fb_var_to_videomode(&mode, var);
+ memcpy(var, &info->var, sizeof(*var));
+ fb_videomode_to_var(var, &mode);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(adf_fbdev_check_var);
+
+/**
+ * adf_fbdev_set_par - default implementation of fbdev set_par op
+ */
+int adf_fbdev_set_par(struct fb_info *info)
+{
+ struct adf_fbdev *fbdev = info->par;
+ struct adf_interface *intf = fbdev->intf;
+ struct fb_videomode vmode;
+ struct drm_mode_modeinfo mode;
+ int ret;
+ u32 format = drm_fourcc_from_fb_var(&info->var);
+
+ fb_var_to_videomode(&vmode, &info->var);
+ adf_modeinfo_from_fb_videomode(&vmode, &mode);
+ ret = adf_interface_set_mode(intf, &mode);
+ if (ret < 0)
+ return ret;
+
+ ret = adf_fbdev_post(fbdev);
+ if (ret < 0)
+ return ret;
+
+ if (format != fbdev->format)
+ adf_fbdev_set_format(fbdev, format);
+
+ return 0;
+}
+EXPORT_SYMBOL(adf_fbdev_set_par);
+
+/**
+ * adf_fbdev_blank - default implementation of fbdev blank op
+ */
+int adf_fbdev_blank(int blank, struct fb_info *info)
+{
+ struct adf_fbdev *fbdev = info->par;
+ struct adf_interface *intf = fbdev->intf;
+ u8 dpms_state;
+
+ switch (blank) {
+ case FB_BLANK_UNBLANK:
+ dpms_state = DRM_MODE_DPMS_ON;
+ break;
+ case FB_BLANK_NORMAL:
+ dpms_state = DRM_MODE_DPMS_STANDBY;
+ break;
+ case FB_BLANK_VSYNC_SUSPEND:
+ dpms_state = DRM_MODE_DPMS_SUSPEND;
+ break;
+ case FB_BLANK_HSYNC_SUSPEND:
+ dpms_state = DRM_MODE_DPMS_STANDBY;
+ break;
+ case FB_BLANK_POWERDOWN:
+ dpms_state = DRM_MODE_DPMS_OFF;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return adf_interface_blank(intf, dpms_state);
+}
+EXPORT_SYMBOL(adf_fbdev_blank);
+
+/**
+ * adf_fbdev_pan_display - default implementation of fbdev pan_display op
+ */
+int adf_fbdev_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ struct adf_fbdev *fbdev = info->par;
+ return adf_fbdev_post(fbdev);
+}
+EXPORT_SYMBOL(adf_fbdev_pan_display);
+
+/**
+ * adf_fbdev_mmap - default implementation of fbdev mmap op
+ */
+int adf_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct adf_fbdev *fbdev = info->par;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ return dma_buf_mmap(fbdev->dma_buf, vma, 0);
+}
+EXPORT_SYMBOL(adf_fbdev_mmap);
+
+/**
+ * adf_fbdev_init - initialize helper to wrap ADF device in fbdev API
+ *
+ * @fbdev: the fbdev helper
+ * @interface: the ADF interface that will display the framebuffer
+ * @eng: the ADF overlay engine that will scan out the framebuffer
+ * @xres_virtual: the virtual width of the framebuffer
+ * @yres_virtual: the virtual height of the framebuffer
+ * @format: the format of the framebuffer
+ * @fbops: the device's fbdev ops
+ * @fmt: formatting for the framebuffer identification string
+ * @...: variable arguments
+ *
+ * @format must be a standard, non-indexed RGB format, i.e.,
+ * adf_format_is_rgb(@format) && @format != @DRM_FORMAT_C8.
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_fbdev_init(struct adf_fbdev *fbdev, struct adf_interface *interface,
+ struct adf_overlay_engine *eng,
+ u16 xres_virtual, u16 yres_virtual, u32 format,
+ struct fb_ops *fbops, const char *fmt, ...)
+{
+ struct adf_device *parent = adf_interface_parent(interface);
+ struct device *dev = &parent->base.dev;
+ u16 width_mm, height_mm;
+ va_list args;
+ int ret;
+
+ if (!adf_format_is_rgb(format) ||
+ format == DRM_FORMAT_C8) {
+ dev_err(dev, "fbdev helper does not support format %u\n",
+ format);
+ return -EINVAL;
+ }
+
+ memset(fbdev, 0, sizeof(*fbdev));
+ fbdev->intf = interface;
+ fbdev->eng = eng;
+ fbdev->info = framebuffer_alloc(0, dev);
+ if (!fbdev->info) {
+ dev_err(dev, "allocating framebuffer device failed\n");
+ return -ENOMEM;
+ }
+ fbdev->default_xres_virtual = xres_virtual;
+ fbdev->default_yres_virtual = yres_virtual;
+ fbdev->default_format = format;
+
+ fbdev->info->flags = FBINFO_FLAG_DEFAULT;
+ ret = adf_interface_get_screen_size(interface, &width_mm, &height_mm);
+ if (ret < 0) {
+ width_mm = 0;
+ height_mm = 0;
+ }
+ fbdev->info->var.width = width_mm;
+ fbdev->info->var.height = height_mm;
+ fbdev->info->var.activate = FB_ACTIVATE_VBL;
+ va_start(args, fmt);
+ vsnprintf(fbdev->info->fix.id, sizeof(fbdev->info->fix.id), fmt, args);
+ va_end(args);
+ fbdev->info->fix.type = FB_TYPE_PACKED_PIXELS;
+ fbdev->info->fix.visual = FB_VISUAL_TRUECOLOR;
+ fbdev->info->fix.xpanstep = 1;
+ fbdev->info->fix.ypanstep = 1;
+ INIT_LIST_HEAD(&fbdev->info->modelist);
+ fbdev->info->fbops = fbops;
+ fbdev->info->pseudo_palette = fbdev->pseudo_palette;
+ fbdev->info->par = fbdev;
+
+ ret = register_framebuffer(fbdev->info);
+ if (ret < 0) {
+ dev_err(dev, "registering framebuffer failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(adf_fbdev_init);
+
+/**
+ * adf_fbdev_destroy - destroy helper to wrap ADF device in fbdev API
+ *
+ * @fbdev: the fbdev helper
+ */
+void adf_fbdev_destroy(struct adf_fbdev *fbdev)
+{
+ unregister_framebuffer(fbdev->info);
+ if (WARN_ON(fbdev->open))
+ adf_fb_destroy(fbdev);
+ framebuffer_release(fbdev->info);
+}
+EXPORT_SYMBOL(adf_fbdev_destroy);
diff --git a/drivers/video/adf/adf_fops.c b/drivers/video/adf/adf_fops.c
new file mode 100644
index 00000000000..abec58ea2ed
--- /dev/null
+++ b/drivers/video/adf/adf_fops.c
@@ -0,0 +1,957 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/circ_buf.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <video/adf_client.h>
+#include <video/adf_format.h>
+
+#include "sw_sync.h"
+#include "sync.h"
+
+#include "adf.h"
+#include "adf_fops.h"
+#include "adf_sysfs.h"
+
+#ifdef CONFIG_COMPAT
+#include "adf_fops32.h"
+#endif
+
+static int adf_obj_set_event(struct adf_obj *obj, struct adf_file *file,
+ struct adf_set_event __user *arg)
+{
+ struct adf_set_event data;
+ bool enabled;
+ unsigned long flags;
+ int err;
+
+ if (copy_from_user(&data, arg, sizeof(data)))
+ return -EFAULT;
+
+ err = adf_obj_check_supports_event(obj, data.type);
+ if (err < 0)
+ return err;
+
+ spin_lock_irqsave(&obj->file_lock, flags);
+ if (data.enabled)
+ enabled = test_and_set_bit(data.type,
+ file->event_subscriptions);
+ else
+ enabled = test_and_clear_bit(data.type,
+ file->event_subscriptions);
+ spin_unlock_irqrestore(&obj->file_lock, flags);
+
+ if (data.enabled == enabled)
+ return -EALREADY;
+
+ if (data.enabled)
+ adf_event_get(obj, data.type);
+ else
+ adf_event_put(obj, data.type);
+
+ return 0;
+}
+
+static int adf_obj_copy_custom_data_to_user(struct adf_obj *obj,
+ void __user *dst, size_t *dst_size)
+{
+ void *custom_data;
+ size_t custom_data_size;
+ int ret;
+
+ if (!obj->ops || !obj->ops->custom_data) {
+ dev_dbg(&obj->dev, "%s: no custom_data op\n", __func__);
+ return 0;
+ }
+
+ custom_data = kzalloc(ADF_MAX_CUSTOM_DATA_SIZE, GFP_KERNEL);
+ if (!custom_data)
+ return -ENOMEM;
+
+ ret = obj->ops->custom_data(obj, custom_data, &custom_data_size);
+ if (ret < 0)
+ goto done;
+
+ if (copy_to_user(dst, custom_data, min(*dst_size, custom_data_size))) {
+ ret = -EFAULT;
+ goto done;
+ }
+ *dst_size = custom_data_size;
+
+done:
+ kfree(custom_data);
+ return ret;
+}
+
+static int adf_eng_get_data(struct adf_overlay_engine *eng,
+ struct adf_overlay_engine_data __user *arg)
+{
+ struct adf_device *dev = adf_overlay_engine_parent(eng);
+ struct adf_overlay_engine_data data;
+ size_t n_supported_formats;
+ u32 *supported_formats = NULL;
+ int ret = 0;
+
+ if (copy_from_user(&data, arg, sizeof(data)))
+ return -EFAULT;
+
+ strlcpy(data.name, eng->base.name, sizeof(data.name));
+
+ if (data.n_supported_formats > ADF_MAX_SUPPORTED_FORMATS)
+ return -EINVAL;
+
+ n_supported_formats = data.n_supported_formats;
+ data.n_supported_formats = eng->ops->n_supported_formats;
+
+ if (n_supported_formats) {
+ supported_formats = kzalloc(n_supported_formats *
+ sizeof(supported_formats[0]), GFP_KERNEL);
+ if (!supported_formats)
+ return -ENOMEM;
+ }
+
+ memcpy(supported_formats, eng->ops->supported_formats,
+ sizeof(u32) * min(n_supported_formats,
+ eng->ops->n_supported_formats));
+
+ mutex_lock(&dev->client_lock);
+ ret = adf_obj_copy_custom_data_to_user(&eng->base, arg->custom_data,
+ &data.custom_data_size);
+ mutex_unlock(&dev->client_lock);
+
+ if (ret < 0)
+ goto done;
+
+ if (copy_to_user(arg, &data, sizeof(data))) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ if (supported_formats && copy_to_user(arg->supported_formats,
+ supported_formats,
+ n_supported_formats * sizeof(supported_formats[0])))
+ ret = -EFAULT;
+
+done:
+ kfree(supported_formats);
+ return ret;
+}
+
+static int adf_buffer_import(struct adf_device *dev,
+ struct adf_buffer_config __user *cfg, struct adf_buffer *buf)
+{
+ struct adf_buffer_config user_buf;
+ size_t i;
+ int ret = 0;
+
+ if (copy_from_user(&user_buf, cfg, sizeof(user_buf)))
+ return -EFAULT;
+
+ memset(buf, 0, sizeof(*buf));
+
+ if (user_buf.n_planes > ADF_MAX_PLANES) {
+ dev_err(&dev->base.dev, "invalid plane count %u\n",
+ user_buf.n_planes);
+ return -EINVAL;
+ }
+
+ buf->overlay_engine = idr_find(&dev->overlay_engines,
+ user_buf.overlay_engine);
+ if (!buf->overlay_engine) {
+ dev_err(&dev->base.dev, "invalid overlay engine id %u\n",
+ user_buf.overlay_engine);
+ return -ENOENT;
+ }
+
+ buf->w = user_buf.w;
+ buf->h = user_buf.h;
+ buf->format = user_buf.format;
+ for (i = 0; i < user_buf.n_planes; i++) {
+ buf->dma_bufs[i] = dma_buf_get(user_buf.fd[i]);
+ if (IS_ERR(buf->dma_bufs[i])) {
+ ret = PTR_ERR(buf->dma_bufs[i]);
+ dev_err(&dev->base.dev, "importing dma_buf fd %llu failed: %d\n",
+ user_buf.fd[i], ret);
+ buf->dma_bufs[i] = NULL;
+ goto done;
+ }
+ buf->offset[i] = user_buf.offset[i];
+ buf->pitch[i] = user_buf.pitch[i];
+ }
+ buf->n_planes = user_buf.n_planes;
+
+ if (user_buf.acquire_fence >= 0) {
+ buf->acquire_fence = sync_fence_fdget(user_buf.acquire_fence);
+ if (!buf->acquire_fence) {
+ dev_err(&dev->base.dev, "getting fence fd %lld failed\n",
+ user_buf.acquire_fence);
+ ret = -EINVAL;
+ goto done;
+ }
+ }
+
+done:
+ if (ret < 0)
+ adf_buffer_cleanup(buf);
+ return ret;
+}
+
+static int adf_device_post_config(struct adf_device *dev,
+ struct adf_post_config __user *arg)
+{
+ struct sync_fence *complete_fence;
+ int complete_fence_fd;
+ struct adf_buffer *bufs = NULL;
+ struct adf_interface **intfs = NULL;
+ size_t n_intfs, n_bufs, i;
+ void *custom_data = NULL;
+ size_t custom_data_size;
+ int ret = 0;
+
+ complete_fence_fd = get_unused_fd();
+ if (complete_fence_fd < 0)
+ return complete_fence_fd;
+
+ if (get_user(n_intfs, &arg->n_interfaces)) {
+ ret = -EFAULT;
+ goto err_get_user;
+ }
+
+ if (n_intfs > ADF_MAX_INTERFACES) {
+ ret = -EINVAL;
+ goto err_get_user;
+ }
+
+ if (get_user(n_bufs, &arg->n_bufs)) {
+ ret = -EFAULT;
+ goto err_get_user;
+ }
+
+ if (n_bufs > ADF_MAX_BUFFERS) {
+ ret = -EINVAL;
+ goto err_get_user;
+ }
+
+ if (get_user(custom_data_size, &arg->custom_data_size)) {
+ ret = -EFAULT;
+ goto err_get_user;
+ }
+
+ if (custom_data_size > ADF_MAX_CUSTOM_DATA_SIZE) {
+ ret = -EINVAL;
+ goto err_get_user;
+ }
+
+ if (n_intfs) {
+ intfs = kmalloc(sizeof(intfs[0]) * n_intfs, GFP_KERNEL);
+ if (!intfs) {
+ ret = -ENOMEM;
+ goto err_get_user;
+ }
+ }
+
+ for (i = 0; i < n_intfs; i++) {
+ u32 intf_id;
+ if (get_user(intf_id, &arg->interfaces[i])) {
+ ret = -EFAULT;
+ goto err_get_user;
+ }
+
+ intfs[i] = idr_find(&dev->interfaces, intf_id);
+ if (!intfs[i]) {
+ ret = -EINVAL;
+ goto err_get_user;
+ }
+ }
+
+ if (n_bufs) {
+ bufs = kzalloc(sizeof(bufs[0]) * n_bufs, GFP_KERNEL);
+ if (!bufs) {
+ ret = -ENOMEM;
+ goto err_get_user;
+ }
+ }
+
+ for (i = 0; i < n_bufs; i++) {
+ ret = adf_buffer_import(dev, &arg->bufs[i], &bufs[i]);
+ if (ret < 0) {
+ memset(&bufs[i], 0, sizeof(bufs[i]));
+ goto err_import;
+ }
+ }
+
+ if (custom_data_size) {
+ custom_data = kzalloc(custom_data_size, GFP_KERNEL);
+ if (!custom_data) {
+ ret = -ENOMEM;
+ goto err_import;
+ }
+
+ if (copy_from_user(custom_data, arg->custom_data,
+ custom_data_size)) {
+ ret = -EFAULT;
+ goto err_import;
+ }
+ }
+
+ if (put_user(complete_fence_fd, &arg->complete_fence)) {
+ ret = -EFAULT;
+ goto err_import;
+ }
+
+ complete_fence = adf_device_post_nocopy(dev, intfs, n_intfs, bufs,
+ n_bufs, custom_data, custom_data_size);
+ if (IS_ERR(complete_fence)) {
+ ret = PTR_ERR(complete_fence);
+ goto err_import;
+ }
+
+ sync_fence_install(complete_fence, complete_fence_fd);
+ return 0;
+
+err_import:
+ for (i = 0; i < n_bufs; i++)
+ adf_buffer_cleanup(&bufs[i]);
+
+err_get_user:
+ kfree(custom_data);
+ kfree(bufs);
+ kfree(intfs);
+ put_unused_fd(complete_fence_fd);
+ return ret;
+}
+
+static int adf_intf_simple_post_config(struct adf_interface *intf,
+ struct adf_simple_post_config __user *arg)
+{
+ struct adf_device *dev = intf->base.parent;
+ struct sync_fence *complete_fence;
+ int complete_fence_fd;
+ struct adf_buffer buf;
+ int ret = 0;
+
+ complete_fence_fd = get_unused_fd();
+ if (complete_fence_fd < 0)
+ return complete_fence_fd;
+
+ ret = adf_buffer_import(dev, &arg->buf, &buf);
+ if (ret < 0)
+ goto err_import;
+
+ if (put_user(complete_fence_fd, &arg->complete_fence)) {
+ ret = -EFAULT;
+ goto err_put_user;
+ }
+
+ complete_fence = adf_interface_simple_post(intf, &buf);
+ if (IS_ERR(complete_fence)) {
+ ret = PTR_ERR(complete_fence);
+ goto err_put_user;
+ }
+
+ sync_fence_install(complete_fence, complete_fence_fd);
+ return 0;
+
+err_put_user:
+ adf_buffer_cleanup(&buf);
+err_import:
+ put_unused_fd(complete_fence_fd);
+ return ret;
+}
+
+static int adf_intf_simple_buffer_alloc(struct adf_interface *intf,
+ struct adf_simple_buffer_alloc __user *arg)
+{
+ struct adf_simple_buffer_alloc data;
+ struct dma_buf *dma_buf;
+ int ret = 0;
+
+ if (copy_from_user(&data, arg, sizeof(data)))
+ return -EFAULT;
+
+ data.fd = get_unused_fd_flags(O_CLOEXEC);
+ if (data.fd < 0)
+ return data.fd;
+
+ ret = adf_interface_simple_buffer_alloc(intf, data.w, data.h,
+ data.format, &dma_buf, &data.offset, &data.pitch);
+ if (ret < 0)
+ goto err_alloc;
+
+ if (copy_to_user(arg, &data, sizeof(*arg))) {
+ ret = -EFAULT;
+ goto err_copy;
+ }
+
+ fd_install(data.fd, dma_buf->file);
+ return 0;
+
+err_copy:
+ dma_buf_put(dma_buf);
+
+err_alloc:
+ put_unused_fd(data.fd);
+ return ret;
+}
+
+static int adf_copy_attachment_list_to_user(
+ struct adf_attachment_config __user *to, size_t n_to,
+ struct adf_attachment *from, size_t n_from)
+{
+ struct adf_attachment_config *temp;
+ size_t n = min(n_to, n_from);
+ size_t i;
+ int ret = 0;
+
+ if (!n)
+ return 0;
+
+ temp = kzalloc(n * sizeof(temp[0]), GFP_KERNEL);
+ if (!temp)
+ return -ENOMEM;
+
+ for (i = 0; i < n; i++) {
+ temp[i].interface = from[i].interface->base.id;
+ temp[i].overlay_engine = from[i].overlay_engine->base.id;
+ }
+
+ if (copy_to_user(to, temp, n * sizeof(to[0]))) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+done:
+ kfree(temp);
+ return ret;
+}
+
+static int adf_device_get_data(struct adf_device *dev,
+ struct adf_device_data __user *arg)
+{
+ struct adf_device_data data;
+ size_t n_attach;
+ struct adf_attachment *attach = NULL;
+ size_t n_allowed_attach;
+ struct adf_attachment *allowed_attach = NULL;
+ int ret = 0;
+
+ if (copy_from_user(&data, arg, sizeof(data)))
+ return -EFAULT;
+
+ if (data.n_attachments > ADF_MAX_ATTACHMENTS ||
+ data.n_allowed_attachments > ADF_MAX_ATTACHMENTS)
+ return -EINVAL;
+
+ strlcpy(data.name, dev->base.name, sizeof(data.name));
+
+ if (data.n_attachments) {
+ attach = kzalloc(data.n_attachments * sizeof(attach[0]),
+ GFP_KERNEL);
+ if (!attach)
+ return -ENOMEM;
+ }
+ n_attach = adf_device_attachments(dev, attach, data.n_attachments);
+
+ if (data.n_allowed_attachments) {
+ allowed_attach = kzalloc(data.n_allowed_attachments *
+ sizeof(allowed_attach[0]), GFP_KERNEL);
+ if (!allowed_attach) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ }
+ n_allowed_attach = adf_device_attachments_allowed(dev, allowed_attach,
+ data.n_allowed_attachments);
+
+ mutex_lock(&dev->client_lock);
+ ret = adf_obj_copy_custom_data_to_user(&dev->base, arg->custom_data,
+ &data.custom_data_size);
+ mutex_unlock(&dev->client_lock);
+
+ if (ret < 0)
+ goto done;
+
+ ret = adf_copy_attachment_list_to_user(arg->attachments,
+ data.n_attachments, attach, n_attach);
+ if (ret < 0)
+ goto done;
+
+ ret = adf_copy_attachment_list_to_user(arg->allowed_attachments,
+ data.n_allowed_attachments, allowed_attach,
+ n_allowed_attach);
+ if (ret < 0)
+ goto done;
+
+ data.n_attachments = n_attach;
+ data.n_allowed_attachments = n_allowed_attach;
+
+ if (copy_to_user(arg, &data, sizeof(data)))
+ ret = -EFAULT;
+
+done:
+ kfree(allowed_attach);
+ kfree(attach);
+ return ret;
+}
+
+static int adf_device_handle_attachment(struct adf_device *dev,
+ struct adf_attachment_config __user *arg, bool attach)
+{
+ struct adf_attachment_config data;
+ struct adf_overlay_engine *eng;
+ struct adf_interface *intf;
+
+ if (copy_from_user(&data, arg, sizeof(data)))
+ return -EFAULT;
+
+ eng = idr_find(&dev->overlay_engines, data.overlay_engine);
+ if (!eng) {
+ dev_err(&dev->base.dev, "invalid overlay engine id %u\n",
+ data.overlay_engine);
+ return -EINVAL;
+ }
+
+ intf = idr_find(&dev->interfaces, data.interface);
+ if (!intf) {
+ dev_err(&dev->base.dev, "invalid interface id %u\n",
+ data.interface);
+ return -EINVAL;
+ }
+
+ if (attach)
+ return adf_device_attach(dev, eng, intf);
+ else
+ return adf_device_detach(dev, eng, intf);
+}
+
+static int adf_intf_set_mode(struct adf_interface *intf,
+ struct drm_mode_modeinfo __user *arg)
+{
+ struct drm_mode_modeinfo mode;
+
+ if (copy_from_user(&mode, arg, sizeof(mode)))
+ return -EFAULT;
+
+ return adf_interface_set_mode(intf, &mode);
+}
+
+static int adf_intf_get_data(struct adf_interface *intf,
+ struct adf_interface_data __user *arg)
+{
+ struct adf_device *dev = adf_interface_parent(intf);
+ struct adf_interface_data data;
+ struct drm_mode_modeinfo *modelist;
+ size_t modelist_size;
+ int err;
+ int ret = 0;
+ unsigned long flags;
+
+ if (copy_from_user(&data, arg, sizeof(data)))
+ return -EFAULT;
+
+ strlcpy(data.name, intf->base.name, sizeof(data.name));
+
+ data.type = intf->type;
+ data.id = intf->idx;
+ data.flags = intf->flags;
+
+ err = adf_interface_get_screen_size(intf, &data.width_mm,
+ &data.height_mm);
+ if (err < 0) {
+ data.width_mm = 0;
+ data.height_mm = 0;
+ }
+
+ modelist = kmalloc(sizeof(modelist[0]) * ADF_MAX_MODES, GFP_KERNEL);
+ if (!modelist)
+ return -ENOMEM;
+
+ mutex_lock(&dev->client_lock);
+ read_lock_irqsave(&intf->hotplug_modelist_lock, flags);
+ data.hotplug_detect = intf->hotplug_detect;
+ modelist_size = min(data.n_available_modes, intf->n_modes) *
+ sizeof(intf->modelist[0]);
+ memcpy(modelist, intf->modelist, modelist_size);
+ data.n_available_modes = intf->n_modes;
+ read_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);
+
+ if (copy_to_user(arg->available_modes, modelist, modelist_size)) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ data.dpms_state = intf->dpms_state;
+ memcpy(&data.current_mode, &intf->current_mode,
+ sizeof(intf->current_mode));
+
+ ret = adf_obj_copy_custom_data_to_user(&intf->base, arg->custom_data,
+ &data.custom_data_size);
+done:
+ mutex_unlock(&dev->client_lock);
+ kfree(modelist);
+
+ if (ret < 0)
+ return ret;
+
+ if (copy_to_user(arg, &data, sizeof(data)))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+static inline long adf_obj_custom_ioctl(struct adf_obj *obj, unsigned int cmd,
+ unsigned long arg)
+{
+ if (obj->ops && obj->ops->ioctl)
+ return obj->ops->ioctl(obj, cmd, arg);
+ return -ENOTTY;
+}
+
+static long adf_overlay_engine_ioctl(struct adf_overlay_engine *eng,
+ struct adf_file *file, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case ADF_SET_EVENT:
+ return adf_obj_set_event(&eng->base, file,
+ (struct adf_set_event __user *)arg);
+
+ case ADF_GET_OVERLAY_ENGINE_DATA:
+ return adf_eng_get_data(eng,
+ (struct adf_overlay_engine_data __user *)arg);
+
+ case ADF_BLANK:
+ case ADF_POST_CONFIG:
+ case ADF_SET_MODE:
+ case ADF_GET_DEVICE_DATA:
+ case ADF_GET_INTERFACE_DATA:
+ case ADF_SIMPLE_POST_CONFIG:
+ case ADF_SIMPLE_BUFFER_ALLOC:
+ case ADF_ATTACH:
+ case ADF_DETACH:
+ return -EINVAL;
+
+ default:
+ return adf_obj_custom_ioctl(&eng->base, cmd, arg);
+ }
+}
+
+static long adf_interface_ioctl(struct adf_interface *intf,
+ struct adf_file *file, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case ADF_SET_EVENT:
+ return adf_obj_set_event(&intf->base, file,
+ (struct adf_set_event __user *)arg);
+
+ case ADF_BLANK:
+ return adf_interface_blank(intf, arg);
+
+ case ADF_SET_MODE:
+ return adf_intf_set_mode(intf,
+ (struct drm_mode_modeinfo __user *)arg);
+
+ case ADF_GET_INTERFACE_DATA:
+ return adf_intf_get_data(intf,
+ (struct adf_interface_data __user *)arg);
+
+ case ADF_SIMPLE_POST_CONFIG:
+ return adf_intf_simple_post_config(intf,
+ (struct adf_simple_post_config __user *)arg);
+
+ case ADF_SIMPLE_BUFFER_ALLOC:
+ return adf_intf_simple_buffer_alloc(intf,
+ (struct adf_simple_buffer_alloc __user *)arg);
+
+ case ADF_POST_CONFIG:
+ case ADF_GET_DEVICE_DATA:
+ case ADF_GET_OVERLAY_ENGINE_DATA:
+ case ADF_ATTACH:
+ case ADF_DETACH:
+ return -EINVAL;
+
+ default:
+ return adf_obj_custom_ioctl(&intf->base, cmd, arg);
+ }
+}
+
+static long adf_device_ioctl(struct adf_device *dev, struct adf_file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case ADF_SET_EVENT:
+ return adf_obj_set_event(&dev->base, file,
+ (struct adf_set_event __user *)arg);
+
+ case ADF_POST_CONFIG:
+ return adf_device_post_config(dev,
+ (struct adf_post_config __user *)arg);
+
+ case ADF_GET_DEVICE_DATA:
+ return adf_device_get_data(dev,
+ (struct adf_device_data __user *)arg);
+
+ case ADF_ATTACH:
+ return adf_device_handle_attachment(dev,
+ (struct adf_attachment_config __user *)arg,
+ true);
+
+ case ADF_DETACH:
+ return adf_device_handle_attachment(dev,
+ (struct adf_attachment_config __user *)arg,
+ false);
+
+ case ADF_BLANK:
+ case ADF_SET_MODE:
+ case ADF_GET_INTERFACE_DATA:
+ case ADF_GET_OVERLAY_ENGINE_DATA:
+ case ADF_SIMPLE_POST_CONFIG:
+ case ADF_SIMPLE_BUFFER_ALLOC:
+ return -EINVAL;
+
+ default:
+ return adf_obj_custom_ioctl(&dev->base, cmd, arg);
+ }
+}
+
+static int adf_file_open(struct inode *inode, struct file *file)
+{
+ struct adf_obj *obj;
+ struct adf_file *fpriv = NULL;
+ unsigned long flags;
+ int ret = 0;
+
+ obj = adf_obj_sysfs_find(iminor(inode));
+ if (!obj)
+ return -ENODEV;
+
+ dev_dbg(&obj->dev, "opening %s\n", dev_name(&obj->dev));
+
+ if (!try_module_get(obj->parent->ops->owner)) {
+ dev_err(&obj->dev, "getting owner module failed\n");
+ return -ENODEV;
+ }
+
+ fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+ if (!fpriv) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ INIT_LIST_HEAD(&fpriv->head);
+ fpriv->obj = obj;
+ init_waitqueue_head(&fpriv->event_wait);
+
+ file->private_data = fpriv;
+
+ if (obj->ops && obj->ops->open) {
+ ret = obj->ops->open(obj, inode, file);
+ if (ret < 0)
+ goto done;
+ }
+
+ spin_lock_irqsave(&obj->file_lock, flags);
+ list_add_tail(&fpriv->head, &obj->file_list);
+ spin_unlock_irqrestore(&obj->file_lock, flags);
+
+done:
+ if (ret < 0) {
+ kfree(fpriv);
+ module_put(obj->parent->ops->owner);
+ }
+ return ret;
+}
+
+static int adf_file_release(struct inode *inode, struct file *file)
+{
+ struct adf_file *fpriv = file->private_data;
+ struct adf_obj *obj = fpriv->obj;
+ enum adf_event_type event_type;
+ unsigned long flags;
+
+ if (obj->ops && obj->ops->release)
+ obj->ops->release(obj, inode, file);
+
+ spin_lock_irqsave(&obj->file_lock, flags);
+ list_del(&fpriv->head);
+ spin_unlock_irqrestore(&obj->file_lock, flags);
+
+ for_each_set_bit(event_type, fpriv->event_subscriptions,
+ ADF_EVENT_TYPE_MAX) {
+ adf_event_put(obj, event_type);
+ }
+
+ kfree(fpriv);
+ module_put(obj->parent->ops->owner);
+
+ dev_dbg(&obj->dev, "released %s\n", dev_name(&obj->dev));
+ return 0;
+}
+
+long adf_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct adf_file *fpriv = file->private_data;
+ struct adf_obj *obj = fpriv->obj;
+ long ret = -EINVAL;
+
+ dev_dbg(&obj->dev, "%s ioctl %u\n", dev_name(&obj->dev), _IOC_NR(cmd));
+
+ switch (obj->type) {
+ case ADF_OBJ_OVERLAY_ENGINE:
+ ret = adf_overlay_engine_ioctl(adf_obj_to_overlay_engine(obj),
+ fpriv, cmd, arg);
+ break;
+
+ case ADF_OBJ_INTERFACE:
+ ret = adf_interface_ioctl(adf_obj_to_interface(obj), fpriv, cmd,
+ arg);
+ break;
+
+ case ADF_OBJ_DEVICE:
+ ret = adf_device_ioctl(adf_obj_to_device(obj), fpriv, cmd, arg);
+ break;
+ }
+
+ return ret;
+}
+
+static inline bool adf_file_event_available(struct adf_file *fpriv)
+{
+ int head = fpriv->event_head;
+ int tail = fpriv->event_tail;
+ return CIRC_CNT(head, tail, sizeof(fpriv->event_buf)) != 0;
+}
+
+void adf_file_queue_event(struct adf_file *fpriv, struct adf_event *event)
+{
+ int head = fpriv->event_head;
+ int tail = fpriv->event_tail;
+ size_t space = CIRC_SPACE(head, tail, sizeof(fpriv->event_buf));
+ size_t space_to_end =
+ CIRC_SPACE_TO_END(head, tail, sizeof(fpriv->event_buf));
+
+ if (space < event->length) {
+ dev_dbg(&fpriv->obj->dev,
+ "insufficient buffer space for event %u\n",
+ event->type);
+ return;
+ }
+
+ if (space_to_end >= event->length) {
+ memcpy(fpriv->event_buf + head, event, event->length);
+ } else {
+ memcpy(fpriv->event_buf + head, event, space_to_end);
+ memcpy(fpriv->event_buf, (u8 *)event + space_to_end,
+ event->length - space_to_end);
+ }
+
+ smp_wmb();
+ fpriv->event_head = (fpriv->event_head + event->length) &
+ (sizeof(fpriv->event_buf) - 1);
+ wake_up_interruptible_all(&fpriv->event_wait);
+}
+
+static ssize_t adf_file_copy_to_user(struct adf_file *fpriv,
+ char __user *buffer, size_t buffer_size)
+{
+ int head, tail;
+ u8 *event_buf;
+ size_t cnt, cnt_to_end, copy_size = 0;
+ ssize_t ret = 0;
+ unsigned long flags;
+
+ event_buf = kmalloc(min(buffer_size, sizeof(fpriv->event_buf)),
+ GFP_KERNEL);
+ if (!event_buf)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&fpriv->obj->file_lock, flags);
+
+ if (!adf_file_event_available(fpriv))
+ goto out;
+
+ head = fpriv->event_head;
+ tail = fpriv->event_tail;
+
+ cnt = CIRC_CNT(head, tail, sizeof(fpriv->event_buf));
+ cnt_to_end = CIRC_CNT_TO_END(head, tail, sizeof(fpriv->event_buf));
+ copy_size = min(buffer_size, cnt);
+
+ if (cnt_to_end >= copy_size) {
+ memcpy(event_buf, fpriv->event_buf + tail, copy_size);
+ } else {
+ memcpy(event_buf, fpriv->event_buf + tail, cnt_to_end);
+ memcpy(event_buf + cnt_to_end, fpriv->event_buf,
+ copy_size - cnt_to_end);
+ }
+
+ fpriv->event_tail = (fpriv->event_tail + copy_size) &
+ (sizeof(fpriv->event_buf) - 1);
+
+out:
+ spin_unlock_irqrestore(&fpriv->obj->file_lock, flags);
+ if (copy_size) {
+ if (copy_to_user(buffer, event_buf, copy_size))
+ ret = -EFAULT;
+ else
+ ret = copy_size;
+ }
+ kfree(event_buf);
+ return ret;
+}
+
+ssize_t adf_file_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *offset)
+{
+ struct adf_file *fpriv = filp->private_data;
+ int err;
+
+ err = wait_event_interruptible(fpriv->event_wait,
+ adf_file_event_available(fpriv));
+ if (err < 0)
+ return err;
+
+ return adf_file_copy_to_user(fpriv, buffer, count);
+}
+
+unsigned int adf_file_poll(struct file *filp, struct poll_table_struct *wait)
+{
+ struct adf_file *fpriv = filp->private_data;
+ unsigned int mask = 0;
+
+ poll_wait(filp, &fpriv->event_wait, wait);
+
+ if (adf_file_event_available(fpriv))
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
+}
+
+const struct file_operations adf_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = adf_file_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = adf_file_compat_ioctl,
+#endif
+ .open = adf_file_open,
+ .release = adf_file_release,
+ .llseek = default_llseek,
+ .read = adf_file_read,
+ .poll = adf_file_poll,
+};
diff --git a/drivers/video/adf/adf_fops.h b/drivers/video/adf/adf_fops.h
new file mode 100644
index 00000000000..90a3a74796d
--- /dev/null
+++ b/drivers/video/adf/adf_fops.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VIDEO_ADF_ADF_FOPS_H
+#define __VIDEO_ADF_ADF_FOPS_H
+
+#include <linux/bitmap.h>
+#include <linux/fs.h>
+
+extern const struct file_operations adf_fops;
+
+struct adf_file {
+ struct list_head head;
+ struct adf_obj *obj;
+
+ DECLARE_BITMAP(event_subscriptions, ADF_EVENT_TYPE_MAX);
+ u8 event_buf[4096];
+ int event_head;
+ int event_tail;
+ wait_queue_head_t event_wait;
+};
+
+void adf_file_queue_event(struct adf_file *file, struct adf_event *event);
+long adf_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+
+#endif /* __VIDEO_ADF_ADF_FOPS_H */
diff --git a/drivers/video/adf/adf_fops32.c b/drivers/video/adf/adf_fops32.c
new file mode 100644
index 00000000000..60a47cf5a78
--- /dev/null
+++ b/drivers/video/adf/adf_fops32.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/uaccess.h>
+#include <video/adf.h>
+
+#include "adf_fops.h"
+#include "adf_fops32.h"
+
+long adf_compat_post_config(struct file *file,
+ struct adf_post_config32 __user *arg)
+{
+ struct adf_post_config32 cfg32;
+ struct adf_post_config __user *cfg;
+ int ret;
+
+ if (copy_from_user(&cfg32, arg, sizeof(cfg32)))
+ return -EFAULT;
+
+ cfg = compat_alloc_user_space(sizeof(*cfg));
+ if (!access_ok(VERIFY_WRITE, cfg, sizeof(*cfg)))
+ return -EFAULT;
+
+ if (put_user(cfg32.n_interfaces, &cfg->n_interfaces) ||
+ put_user(compat_ptr(cfg32.interfaces),
+ &cfg->interfaces) ||
+ put_user(cfg32.n_bufs, &cfg->n_bufs) ||
+ put_user(compat_ptr(cfg32.bufs), &cfg->bufs) ||
+ put_user(cfg32.custom_data_size,
+ &cfg->custom_data_size) ||
+ put_user(compat_ptr(cfg32.custom_data),
+ &cfg->custom_data))
+ return -EFAULT;
+
+ ret = adf_file_ioctl(file, ADF_POST_CONFIG, (unsigned long)cfg);
+ if (ret < 0)
+ return ret;
+
+ if (copy_in_user(&arg->complete_fence, &cfg->complete_fence,
+ sizeof(cfg->complete_fence)))
+ return -EFAULT;
+
+ return 0;
+}
+
+long adf_compat_get_device_data(struct file *file,
+ struct adf_device_data32 __user *arg)
+{
+ struct adf_device_data32 data32;
+ struct adf_device_data __user *data;
+ int ret;
+
+ if (copy_from_user(&data32, arg, sizeof(data32)))
+ return -EFAULT;
+
+ data = compat_alloc_user_space(sizeof(*data));
+ if (!access_ok(VERIFY_WRITE, data, sizeof(*data)))
+ return -EFAULT;
+
+ if (put_user(data32.n_attachments, &data->n_attachments) ||
+ put_user(compat_ptr(data32.attachments),
+ &data->attachments) ||
+ put_user(data32.n_allowed_attachments,
+ &data->n_allowed_attachments) ||
+ put_user(compat_ptr(data32.allowed_attachments),
+ &data->allowed_attachments) ||
+ put_user(data32.custom_data_size,
+ &data->custom_data_size) ||
+ put_user(compat_ptr(data32.custom_data),
+ &data->custom_data))
+ return -EFAULT;
+
+ ret = adf_file_ioctl(file, ADF_GET_DEVICE_DATA32, (unsigned long)data);
+ if (ret < 0)
+ return ret;
+
+ if (copy_in_user(arg->name, data->name, sizeof(arg->name)) ||
+ copy_in_user(&arg->n_attachments, &data->n_attachments,
+ sizeof(arg->n_attachments)) ||
+ copy_in_user(&arg->n_allowed_attachments,
+ &data->n_allowed_attachments,
+ sizeof(arg->n_allowed_attachments)) ||
+ copy_in_user(&arg->custom_data_size,
+ &data->custom_data_size,
+ sizeof(arg->custom_data_size)))
+ return -EFAULT;
+
+ return 0;
+}
+
+long adf_compat_get_interface_data(struct file *file,
+ struct adf_interface_data32 __user *arg)
+{
+ struct adf_interface_data32 data32;
+ struct adf_interface_data __user *data;
+ int ret;
+
+ if (copy_from_user(&data32, arg, sizeof(data32)))
+ return -EFAULT;
+
+ data = compat_alloc_user_space(sizeof(*data));
+ if (!access_ok(VERIFY_WRITE, data, sizeof(*data)))
+ return -EFAULT;
+
+ if (put_user(data32.n_available_modes, &data->n_available_modes) ||
+ put_user(compat_ptr(data32.available_modes),
+ &data->available_modes) ||
+ put_user(data32.custom_data_size,
+ &data->custom_data_size) ||
+ put_user(compat_ptr(data32.custom_data),
+ &data->custom_data))
+ return -EFAULT;
+
+ ret = adf_file_ioctl(file, ADF_GET_DEVICE_DATA32, (unsigned long)data);
+ if (ret < 0)
+ return ret;
+
+ if (copy_in_user(arg->name, data->name, sizeof(arg->name)) ||
+ copy_in_user(&arg->type, &data->type,
+ sizeof(arg->type)) ||
+ copy_in_user(&arg->id, &data->id, sizeof(arg->id)) ||
+ copy_in_user(&arg->flags, &data->flags,
+ sizeof(arg->flags)) ||
+ copy_in_user(&arg->dpms_state, &data->dpms_state,
+ sizeof(arg->dpms_state)) ||
+ copy_in_user(&arg->hotplug_detect,
+ &data->hotplug_detect,
+ sizeof(arg->hotplug_detect)) ||
+ copy_in_user(&arg->width_mm, &data->width_mm,
+ sizeof(arg->width_mm)) ||
+ copy_in_user(&arg->height_mm, &data->height_mm,
+ sizeof(arg->height_mm)) ||
+ copy_in_user(&arg->current_mode, &data->current_mode,
+ sizeof(arg->current_mode)) ||
+ copy_in_user(&arg->n_available_modes,
+ &data->n_available_modes,
+ sizeof(arg->n_available_modes)) ||
+ copy_in_user(&arg->custom_data_size,
+ &data->custom_data_size,
+ sizeof(arg->custom_data_size)))
+ return -EFAULT;
+
+ return 0;
+}
+
+long adf_compat_get_overlay_engine_data(struct file *file,
+ struct adf_overlay_engine_data32 __user *arg)
+{
+ struct adf_overlay_engine_data32 data32;
+ struct adf_overlay_engine_data __user *data;
+ int ret;
+
+ if (copy_from_user(&data32, arg, sizeof(data32)))
+ return -EFAULT;
+
+ data = compat_alloc_user_space(sizeof(*data));
+ if (!access_ok(VERIFY_WRITE, data, sizeof(*data)))
+ return -EFAULT;
+
+ if (put_user(data32.n_supported_formats, &data->n_supported_formats) ||
+ put_user(compat_ptr(data32.supported_formats),
+ &data->supported_formats) ||
+ put_user(data32.custom_data_size,
+ &data->custom_data_size) ||
+ put_user(compat_ptr(data32.custom_data),
+ &data->custom_data))
+ return -EFAULT;
+
+ ret = adf_file_ioctl(file, ADF_GET_OVERLAY_ENGINE_DATA,
+ (unsigned long)data);
+ if (ret < 0)
+ return ret;
+
+ if (copy_in_user(arg->name, data->name, sizeof(arg->name)) ||
+ copy_in_user(&arg->n_supported_formats,
+ &data->n_supported_formats,
+ sizeof(arg->n_supported_formats)) ||
+ copy_in_user(&arg->custom_data_size,
+ &data->custom_data_size,
+ sizeof(arg->custom_data_size)))
+ return -EFAULT;
+
+ return 0;
+}
+
+long adf_file_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case ADF_POST_CONFIG32:
+ return adf_compat_post_config(file, compat_ptr(arg));
+
+ case ADF_GET_DEVICE_DATA32:
+ return adf_compat_get_device_data(file, compat_ptr(arg));
+
+ case ADF_GET_INTERFACE_DATA32:
+ return adf_compat_get_interface_data(file, compat_ptr(arg));
+
+ case ADF_GET_OVERLAY_ENGINE_DATA32:
+ return adf_compat_get_overlay_engine_data(file,
+ compat_ptr(arg));
+
+ default:
+ return adf_file_ioctl(file, cmd, arg);
+ }
+}
diff --git a/drivers/video/adf/adf_fops32.h b/drivers/video/adf/adf_fops32.h
new file mode 100644
index 00000000000..18c673dc5e2
--- /dev/null
+++ b/drivers/video/adf/adf_fops32.h
@@ -0,0 +1,78 @@
+#ifndef __VIDEO_ADF_ADF_FOPS32_H
+#define __VIDEO_ADF_ADF_FOPS32_H
+
+#include <linux/compat.h>
+#include <linux/ioctl.h>
+
+#include <video/adf.h>
+
+#define ADF_POST_CONFIG32 \
+ _IOW('D', 2, struct adf_post_config32)
+#define ADF_GET_DEVICE_DATA32 \
+ _IOR('D', 4, struct adf_device_data32)
+#define ADF_GET_INTERFACE_DATA32 \
+ _IOR('D', 5, struct adf_interface_data32)
+#define ADF_GET_OVERLAY_ENGINE_DATA32 \
+ _IOR('D', 6, struct adf_overlay_engine_data32)
+
+struct adf_post_config32 {
+ compat_size_t n_interfaces;
+ compat_uptr_t interfaces;
+
+ compat_size_t n_bufs;
+ compat_uptr_t bufs;
+
+ compat_size_t custom_data_size;
+ compat_uptr_t custom_data;
+
+ __s64 complete_fence;
+};
+
+struct adf_device_data32 {
+ char name[ADF_NAME_LEN];
+
+ compat_size_t n_attachments;
+ compat_uptr_t attachments;
+
+ compat_size_t n_allowed_attachments;
+ compat_uptr_t allowed_attachments;
+
+ compat_size_t custom_data_size;
+ compat_uptr_t custom_data;
+};
+
+struct adf_interface_data32 {
+ char name[ADF_NAME_LEN];
+
+ __u8 type;
+ __u32 id;
+ /* e.g. type=ADF_INTF_TYPE_DSI, id=1 => DSI.1 */
+ __u32 flags;
+
+ __u8 dpms_state;
+ __u8 hotplug_detect;
+ __u16 width_mm;
+ __u16 height_mm;
+
+ struct drm_mode_modeinfo current_mode;
+ compat_size_t n_available_modes;
+ compat_uptr_t available_modes;
+
+ compat_size_t custom_data_size;
+ compat_uptr_t custom_data;
+};
+
+struct adf_overlay_engine_data32 {
+ char name[ADF_NAME_LEN];
+
+ compat_size_t n_supported_formats;
+ compat_uptr_t supported_formats;
+
+ compat_size_t custom_data_size;
+ compat_uptr_t custom_data;
+};
+
+long adf_file_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+
+#endif /* __VIDEO_ADF_ADF_FOPS32_H */
diff --git a/drivers/video/adf/adf_format.c b/drivers/video/adf/adf_format.c
new file mode 100644
index 00000000000..e3f22c7c85d
--- /dev/null
+++ b/drivers/video/adf/adf_format.c
@@ -0,0 +1,280 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ * modified from drivers/gpu/drm/drm_crtc.c
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <drm/drm_fourcc.h>
+#include <video/adf_format.h>
+
+bool adf_format_is_standard(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ case DRM_FORMAT_XRGB4444:
+ case DRM_FORMAT_XBGR4444:
+ case DRM_FORMAT_RGBX4444:
+ case DRM_FORMAT_BGRX4444:
+ case DRM_FORMAT_ARGB4444:
+ case DRM_FORMAT_ABGR4444:
+ case DRM_FORMAT_RGBA4444:
+ case DRM_FORMAT_BGRA4444:
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_RGBX5551:
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_AYUV:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return true;
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL(adf_format_is_standard);
+
+bool adf_format_is_rgb(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_RGBX5551:
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ return true;
+
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL(adf_format_is_rgb);
+
+u8 adf_format_num_planes(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 3;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(adf_format_num_planes);
+
+u8 adf_format_bpp(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ return 8;
+
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_RGBX5551:
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ return 16;
+
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ return 24;
+
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ return 32;
+
+ default:
+ pr_debug("%s: unsupported pixel format %u\n", __func__, format);
+ return 0;
+ }
+}
+EXPORT_SYMBOL(adf_format_bpp);
+
+u8 adf_format_plane_cpp(u32 format, int plane)
+{
+ if (plane >= adf_format_num_planes(format))
+ return 0;
+
+ switch (format) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ return 2;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ return plane ? 2 : 1;
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 1;
+ default:
+ return adf_format_bpp(format) / 8;
+ }
+}
+EXPORT_SYMBOL(adf_format_plane_cpp);
+
+u8 adf_format_horz_chroma_subsampling(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ return 4;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(adf_format_horz_chroma_subsampling);
+
+u8 adf_format_vert_chroma_subsampling(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ return 4;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(adf_format_vert_chroma_subsampling);
diff --git a/drivers/video/adf/adf_memblock.c b/drivers/video/adf/adf_memblock.c
new file mode 100644
index 00000000000..3c99f27388d
--- /dev/null
+++ b/drivers/video/adf/adf_memblock.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/highmem.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+
+struct adf_memblock_pdata {
+ phys_addr_t base;
+};
+
+static struct sg_table *adf_memblock_map(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction)
+{
+ struct adf_memblock_pdata *pdata = attach->dmabuf->priv;
+ unsigned long pfn = PFN_DOWN(pdata->base);
+ struct page *page = pfn_to_page(pfn);
+ struct sg_table *table;
+ int ret;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret < 0)
+ goto err;
+
+ sg_set_page(table->sgl, page, attach->dmabuf->size, 0);
+ return table;
+
+err:
+ kfree(table);
+ return ERR_PTR(ret);
+}
+
+static void adf_memblock_unmap(struct dma_buf_attachment *attach,
+ struct sg_table *table, enum dma_data_direction direction)
+{
+ sg_free_table(table);
+}
+
+static void __init_memblock adf_memblock_release(struct dma_buf *buf)
+{
+ struct adf_memblock_pdata *pdata = buf->priv;
+ int err = memblock_free(pdata->base, buf->size);
+
+ if (err < 0)
+ pr_warn("%s: freeing memblock failed: %d\n", __func__, err);
+ kfree(pdata);
+}
+
+static void *adf_memblock_do_kmap(struct dma_buf *buf, unsigned long pgoffset,
+ bool atomic)
+{
+ struct adf_memblock_pdata *pdata = buf->priv;
+ unsigned long pfn = PFN_DOWN(pdata->base) + pgoffset;
+ struct page *page = pfn_to_page(pfn);
+
+ if (atomic)
+ return kmap_atomic(page);
+ else
+ return kmap(page);
+}
+
+static void *adf_memblock_kmap_atomic(struct dma_buf *buf,
+ unsigned long pgoffset)
+{
+ return adf_memblock_do_kmap(buf, pgoffset, true);
+}
+
+static void adf_memblock_kunmap_atomic(struct dma_buf *buf,
+ unsigned long pgoffset, void *vaddr)
+{
+ kunmap_atomic(vaddr);
+}
+
+static void *adf_memblock_kmap(struct dma_buf *buf, unsigned long pgoffset)
+{
+ return adf_memblock_do_kmap(buf, pgoffset, false);
+}
+
+static void adf_memblock_kunmap(struct dma_buf *buf, unsigned long pgoffset,
+ void *vaddr)
+{
+ kunmap(vaddr);
+}
+
+static int adf_memblock_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
+{
+ struct adf_memblock_pdata *pdata = buf->priv;
+
+ return remap_pfn_range(vma, vma->vm_start, PFN_DOWN(pdata->base),
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+}
+
+struct dma_buf_ops adf_memblock_ops = {
+ .map_dma_buf = adf_memblock_map,
+ .unmap_dma_buf = adf_memblock_unmap,
+ .release = adf_memblock_release,
+ .kmap_atomic = adf_memblock_kmap_atomic,
+ .kunmap_atomic = adf_memblock_kunmap_atomic,
+ .kmap = adf_memblock_kmap,
+ .kunmap = adf_memblock_kunmap,
+ .mmap = adf_memblock_mmap,
+};
+
+/**
+ * adf_memblock_export - export a memblock reserved area as a dma-buf
+ *
+ * @base: base physical address
+ * @size: memblock size
+ * @flags: mode flags for the dma-buf's file
+ *
+ * @base and @size must be page-aligned.
+ *
+ * Returns a dma-buf on success or ERR_PTR(-errno) on failure.
+ */
+struct dma_buf *adf_memblock_export(phys_addr_t base, size_t size, int flags)
+{
+ struct adf_memblock_pdata *pdata;
+ struct dma_buf *buf;
+
+ if (PAGE_ALIGN(base) != base || PAGE_ALIGN(size) != size)
+ return ERR_PTR(-EINVAL);
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ pdata->base = base;
+ buf = dma_buf_export(pdata, &adf_memblock_ops, size, flags);
+ if (IS_ERR(buf))
+ kfree(pdata);
+
+ return buf;
+}
diff --git a/drivers/video/adf/adf_sysfs.c b/drivers/video/adf/adf_sysfs.c
new file mode 100644
index 00000000000..8c659c71ffa
--- /dev/null
+++ b/drivers/video/adf/adf_sysfs.c
@@ -0,0 +1,296 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <video/adf_client.h>
+
+#include "adf.h"
+#include "adf_fops.h"
+#include "adf_sysfs.h"
+
+static struct class *adf_class;
+static int adf_major;
+static DEFINE_IDR(adf_minors);
+
+#define dev_to_adf_interface(p) \
+ adf_obj_to_interface(container_of(p, struct adf_obj, dev))
+
+static ssize_t dpms_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct adf_interface *intf = dev_to_adf_interface(dev);
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ adf_interface_dpms_state(intf));
+}
+
+static ssize_t dpms_state_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct adf_interface *intf = dev_to_adf_interface(dev);
+ u8 dpms_state;
+ int err;
+
+ err = kstrtou8(buf, 0, &dpms_state);
+ if (err < 0)
+ return err;
+
+ err = adf_interface_blank(intf, dpms_state);
+ if (err < 0)
+ return err;
+
+ return count;
+}
+
+static ssize_t current_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct adf_interface *intf = dev_to_adf_interface(dev);
+ struct drm_mode_modeinfo mode;
+
+ adf_interface_current_mode(intf, &mode);
+
+ if (mode.name[0]) {
+ return scnprintf(buf, PAGE_SIZE, "%s\n", mode.name);
+ } else {
+ bool interlaced = !!(mode.flags & DRM_MODE_FLAG_INTERLACE);
+ return scnprintf(buf, PAGE_SIZE, "%ux%u%s\n", mode.hdisplay,
+ mode.vdisplay, interlaced ? "i" : "");
+ }
+}
+
+static ssize_t type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct adf_interface *intf = dev_to_adf_interface(dev);
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ adf_interface_type_str(intf));
+}
+
+static ssize_t vsync_timestamp_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct adf_interface *intf = dev_to_adf_interface(dev);
+ ktime_t timestamp;
+ unsigned long flags;
+
+ read_lock_irqsave(&intf->vsync_lock, flags);
+ memcpy(&timestamp, &intf->vsync_timestamp, sizeof(timestamp));
+ read_unlock_irqrestore(&intf->vsync_lock, flags);
+
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", ktime_to_ns(timestamp));
+}
+
+static ssize_t hotplug_detect_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct adf_interface *intf = dev_to_adf_interface(dev);
+ return scnprintf(buf, PAGE_SIZE, "%u\n", intf->hotplug_detect);
+}
+
+static struct device_attribute adf_interface_attrs[] = {
+ __ATTR(dpms_state, S_IRUGO|S_IWUSR, dpms_state_show, dpms_state_store),
+ __ATTR_RO(current_mode),
+ __ATTR_RO(hotplug_detect),
+ __ATTR_RO(type),
+ __ATTR_RO(vsync_timestamp),
+};
+
+int adf_obj_sysfs_init(struct adf_obj *obj, struct device *parent)
+{
+ int ret = idr_alloc(&adf_minors, obj, 0, 0, GFP_KERNEL);
+ if (ret < 0) {
+ pr_err("%s: allocating adf minor failed: %d\n", __func__,
+ ret);
+ return ret;
+ }
+
+ obj->minor = ret;
+ obj->dev.parent = parent;
+ obj->dev.class = adf_class;
+ obj->dev.devt = MKDEV(adf_major, obj->minor);
+
+ ret = device_register(&obj->dev);
+ if (ret < 0) {
+ pr_err("%s: registering adf object failed: %d\n", __func__,
+ ret);
+ goto err_device_register;
+ }
+
+ return 0;
+
+err_device_register:
+ idr_remove(&adf_minors, obj->minor);
+ return ret;
+}
+
+static char *adf_device_devnode(struct device *dev, umode_t *mode,
+ kuid_t *uid, kgid_t *gid)
+{
+ struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
+ return kasprintf(GFP_KERNEL, "adf%d", obj->id);
+}
+
+static char *adf_interface_devnode(struct device *dev, umode_t *mode,
+ kuid_t *uid, kgid_t *gid)
+{
+ struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
+ struct adf_interface *intf = adf_obj_to_interface(obj);
+ struct adf_device *parent = adf_interface_parent(intf);
+ return kasprintf(GFP_KERNEL, "adf-interface%d.%d",
+ parent->base.id, intf->base.id);
+}
+
+static char *adf_overlay_engine_devnode(struct device *dev, umode_t *mode,
+ kuid_t *uid, kgid_t *gid)
+{
+ struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
+ struct adf_overlay_engine *eng = adf_obj_to_overlay_engine(obj);
+ struct adf_device *parent = adf_overlay_engine_parent(eng);
+ return kasprintf(GFP_KERNEL, "adf-overlay-engine%d.%d",
+ parent->base.id, eng->base.id);
+}
+
+static void adf_noop_release(struct device *dev)
+{
+}
+
+static struct device_type adf_device_type = {
+ .name = "adf_device",
+ .devnode = adf_device_devnode,
+ .release = adf_noop_release,
+};
+
+static struct device_type adf_interface_type = {
+ .name = "adf_interface",
+ .devnode = adf_interface_devnode,
+ .release = adf_noop_release,
+};
+
+static struct device_type adf_overlay_engine_type = {
+ .name = "adf_overlay_engine",
+ .devnode = adf_overlay_engine_devnode,
+ .release = adf_noop_release,
+};
+
+int adf_device_sysfs_init(struct adf_device *dev)
+{
+ dev->base.dev.type = &adf_device_type;
+ dev_set_name(&dev->base.dev, "%s", dev->base.name);
+ return adf_obj_sysfs_init(&dev->base, dev->dev);
+}
+
+int adf_interface_sysfs_init(struct adf_interface *intf)
+{
+ struct adf_device *parent = adf_interface_parent(intf);
+ size_t i, j;
+ int ret;
+
+ intf->base.dev.type = &adf_interface_type;
+ dev_set_name(&intf->base.dev, "%s-interface%d", parent->base.name,
+ intf->base.id);
+
+ ret = adf_obj_sysfs_init(&intf->base, &parent->base.dev);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(adf_interface_attrs); i++) {
+ ret = device_create_file(&intf->base.dev,
+ &adf_interface_attrs[i]);
+ if (ret < 0) {
+ dev_err(&intf->base.dev, "creating sysfs attribute %s failed: %d\n",
+ adf_interface_attrs[i].attr.name, ret);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ for (j = 0; j < i; j++)
+ device_remove_file(&intf->base.dev, &adf_interface_attrs[j]);
+ return ret;
+}
+
+int adf_overlay_engine_sysfs_init(struct adf_overlay_engine *eng)
+{
+ struct adf_device *parent = adf_overlay_engine_parent(eng);
+
+ eng->base.dev.type = &adf_overlay_engine_type;
+ dev_set_name(&eng->base.dev, "%s-overlay-engine%d", parent->base.name,
+ eng->base.id);
+
+ return adf_obj_sysfs_init(&eng->base, &parent->base.dev);
+}
+
+struct adf_obj *adf_obj_sysfs_find(int minor)
+{
+ return idr_find(&adf_minors, minor);
+}
+
+void adf_obj_sysfs_destroy(struct adf_obj *obj)
+{
+ idr_remove(&adf_minors, obj->minor);
+ device_unregister(&obj->dev);
+}
+
+void adf_device_sysfs_destroy(struct adf_device *dev)
+{
+ adf_obj_sysfs_destroy(&dev->base);
+}
+
+void adf_interface_sysfs_destroy(struct adf_interface *intf)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(adf_interface_attrs); i++)
+ device_remove_file(&intf->base.dev, &adf_interface_attrs[i]);
+ adf_obj_sysfs_destroy(&intf->base);
+}
+
+void adf_overlay_engine_sysfs_destroy(struct adf_overlay_engine *eng)
+{
+ adf_obj_sysfs_destroy(&eng->base);
+}
+
+int adf_sysfs_init(void)
+{
+ struct class *class;
+ int ret;
+
+ class = class_create(THIS_MODULE, "adf");
+ if (IS_ERR(class)) {
+ ret = PTR_ERR(class);
+ pr_err("%s: creating class failed: %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = register_chrdev(0, "adf", &adf_fops);
+ if (ret < 0) {
+ pr_err("%s: registering device failed: %d\n", __func__, ret);
+ goto err_chrdev;
+ }
+
+ adf_class = class;
+ adf_major = ret;
+ return 0;
+
+err_chrdev:
+ class_destroy(adf_class);
+ return ret;
+}
+
+void adf_sysfs_destroy(void)
+{
+ idr_destroy(&adf_minors);
+ class_destroy(adf_class);
+}
diff --git a/drivers/video/adf/adf_sysfs.h b/drivers/video/adf/adf_sysfs.h
new file mode 100644
index 00000000000..0613ac364f8
--- /dev/null
+++ b/drivers/video/adf/adf_sysfs.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VIDEO_ADF_ADF_SYSFS_H
+#define __VIDEO_ADF_ADF_SYSFS_H
+
+struct adf_device;
+struct adf_interface;
+struct adf_overlay_engine;
+
+int adf_device_sysfs_init(struct adf_device *dev);
+void adf_device_sysfs_destroy(struct adf_device *dev);
+int adf_interface_sysfs_init(struct adf_interface *intf);
+void adf_interface_sysfs_destroy(struct adf_interface *intf);
+int adf_overlay_engine_sysfs_init(struct adf_overlay_engine *eng);
+void adf_overlay_engine_sysfs_destroy(struct adf_overlay_engine *eng);
+struct adf_obj *adf_obj_sysfs_find(int minor);
+
+int adf_sysfs_init(void);
+void adf_sysfs_destroy(void);
+
+#endif /* __VIDEO_ADF_ADF_SYSFS_H */
diff --git a/drivers/video/adf/adf_trace.h b/drivers/video/adf/adf_trace.h
new file mode 100644
index 00000000000..3cb2a84d728
--- /dev/null
+++ b/drivers/video/adf/adf_trace.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM adf
+
+#if !defined(__VIDEO_ADF_ADF_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __VIDEO_ADF_ADF_TRACE_H
+
+#include <linux/tracepoint.h>
+#include <video/adf.h>
+
+TRACE_EVENT(adf_event,
+ TP_PROTO(struct adf_obj *obj, enum adf_event_type type),
+ TP_ARGS(obj, type),
+
+ TP_STRUCT__entry(
+ __string(name, obj->name)
+ __field(enum adf_event_type, type)
+ __array(char, type_str, 32)
+ ),
+ TP_fast_assign(
+ __assign_str(name, obj->name);
+ __entry->type = type;
+ strlcpy(__entry->type_str, adf_event_type_str(obj, type),
+ sizeof(__entry->type_str));
+ ),
+ TP_printk("obj=%s type=%u (%s)",
+ __get_str(name),
+ __entry->type,
+ __entry->type_str)
+);
+
+TRACE_EVENT(adf_event_enable,
+ TP_PROTO(struct adf_obj *obj, enum adf_event_type type),
+ TP_ARGS(obj, type),
+
+ TP_STRUCT__entry(
+ __string(name, obj->name)
+ __field(enum adf_event_type, type)
+ __array(char, type_str, 32)
+ ),
+ TP_fast_assign(
+ __assign_str(name, obj->name);
+ __entry->type = type;
+ strlcpy(__entry->type_str, adf_event_type_str(obj, type),
+ sizeof(__entry->type_str));
+ ),
+ TP_printk("obj=%s type=%u (%s)",
+ __get_str(name),
+ __entry->type,
+ __entry->type_str)
+);
+
+TRACE_EVENT(adf_event_disable,
+ TP_PROTO(struct adf_obj *obj, enum adf_event_type type),
+ TP_ARGS(obj, type),
+
+ TP_STRUCT__entry(
+ __string(name, obj->name)
+ __field(enum adf_event_type, type)
+ __array(char, type_str, 32)
+ ),
+ TP_fast_assign(
+ __assign_str(name, obj->name);
+ __entry->type = type;
+ strlcpy(__entry->type_str, adf_event_type_str(obj, type),
+ sizeof(__entry->type_str));
+ ),
+ TP_printk("obj=%s type=%u (%s)",
+ __get_str(name),
+ __entry->type,
+ __entry->type_str)
+);
+
+#endif /* __VIDEO_ADF_ADF_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE adf_trace
+#include <trace/define_trace.h>
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index e033491fe30..ab29939ea4c 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -18,6 +18,8 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/platform_data/ds2482.h>
#include <asm/delay.h>
#include "../w1.h"
@@ -84,7 +86,8 @@ static const u8 ds2482_chan_rd[8] =
static int ds2482_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int ds2482_remove(struct i2c_client *client);
-
+static int ds2482_suspend(struct device *dev);
+static int ds2482_resume(struct device *dev);
/**
* Driver data (common to all clients)
@@ -94,10 +97,16 @@ static const struct i2c_device_id ds2482_id[] = {
{ }
};
+static const struct dev_pm_ops ds2482_pm_ops = {
+ .suspend = ds2482_suspend,
+ .resume = ds2482_resume,
+};
+
static struct i2c_driver ds2482_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "ds2482",
+ .pm = &ds2482_pm_ops,
},
.probe = ds2482_probe,
.remove = ds2482_remove,
@@ -119,6 +128,7 @@ struct ds2482_w1_chan {
struct ds2482_data {
struct i2c_client *client;
struct mutex access_lock;
+ int slpz_gpio;
/* 1-wire interface(s) */
int w1_count; /* 1 or 8 */
@@ -444,11 +454,31 @@ static u8 ds2482_w1_set_pullup(void *data, int delay)
return retval;
}
+static int ds2482_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ds2482_data *data = i2c_get_clientdata(client);
+
+ if (data->slpz_gpio >= 0)
+ gpio_set_value(data->slpz_gpio, 0);
+ return 0;
+}
+
+static int ds2482_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ds2482_data *data = i2c_get_clientdata(client);
+
+ if (data->slpz_gpio >= 0)
+ gpio_set_value(data->slpz_gpio, 1);
+ return 0;
+}
static int ds2482_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct ds2482_data *data;
+ struct ds2482_platform_data *pdata;
int err = -ENODEV;
int temp1;
int idx;
@@ -515,6 +545,16 @@ static int ds2482_probe(struct i2c_client *client,
}
}
+ pdata = client->dev.platform_data;
+ data->slpz_gpio = pdata ? pdata->slpz_gpio : -1;
+
+ if (data->slpz_gpio >= 0) {
+ err = gpio_request_one(data->slpz_gpio, GPIOF_OUT_INIT_HIGH,
+ "ds2482.slpz");
+ if (err < 0)
+ goto exit_w1_remove;
+ }
+
return 0;
exit_w1_remove:
@@ -539,6 +579,11 @@ static int ds2482_remove(struct i2c_client *client)
w1_remove_master_device(&data->w1_ch[idx].w1_bm);
}
+ if (data->slpz_gpio >= 0) {
+ gpio_set_value(data->slpz_gpio, 0);
+ gpio_free(data->slpz_gpio);
+ }
+
/* Free the memory */
kfree(data);
return 0;