diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/bus/Kconfig | 7 | ||||
-rw-r--r-- | drivers/bus/Makefile | 2 | ||||
-rw-r--r-- | drivers/bus/arm-cci.c | 947 | ||||
-rw-r--r-- | drivers/clk/Kconfig | 2 | ||||
-rw-r--r-- | drivers/clk/versatile/clk-vexpress-osc.c | 4 | ||||
-rw-r--r-- | drivers/cpuidle/Makefile | 2 | ||||
-rw-r--r-- | drivers/cpuidle/arm_big_little.c | 183 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle-calxeda.c | 14 | ||||
-rw-r--r-- | drivers/irqchip/irq-gic.c | 6 | ||||
-rw-r--r-- | drivers/mfd/Kconfig | 10 | ||||
-rw-r--r-- | drivers/mfd/Makefile | 1 | ||||
-rw-r--r-- | drivers/mfd/vexpress-config.c | 61 | ||||
-rw-r--r-- | drivers/mfd/vexpress-spc.c | 633 | ||||
-rw-r--r-- | drivers/mfd/vexpress-sysreg.c | 12 | ||||
-rw-r--r-- | drivers/net/ethernet/smsc/smc91x.c | 2 | ||||
-rw-r--r-- | drivers/power/reset/Kconfig | 3 | ||||
-rw-r--r-- | drivers/video/Kconfig | 20 | ||||
-rw-r--r-- | drivers/video/Makefile | 4 | ||||
-rw-r--r-- | drivers/video/amba-clcd.c | 280 | ||||
-rw-r--r-- | drivers/video/arm-hdlcd.c | 844 | ||||
-rw-r--r-- | drivers/video/vexpress-dvi.c | 220 |
21 files changed, 3209 insertions, 48 deletions
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index b05ecab915c..5286e2d333b 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -26,4 +26,11 @@ config OMAP_INTERCONNECT help Driver to enable OMAP interconnect error handling driver. + +config ARM_CCI + bool "ARM CCI driver support" + depends on ARM + help + Driver supporting the CCI cache coherent interconnect for ARM + platforms. endmenu diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile index 3c7b53c1209..670cea44380 100644 --- a/drivers/bus/Makefile +++ b/drivers/bus/Makefile @@ -7,3 +7,5 @@ obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o # Interconnect bus driver for OMAP SoCs. obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o +# CCI cache coherent interconnect for ARM platforms +obj-$(CONFIG_ARM_CCI) += arm-cci.o diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c new file mode 100644 index 00000000000..ed284ab0041 --- /dev/null +++ b/drivers/bus/arm-cci.c @@ -0,0 +1,947 @@ +/* + * CCI cache coherent interconnect driver + * + * Copyright (C) 2013 ARM Ltd. + * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/arm-cci.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of_address.h> +#include <linux/slab.h> + +#include <asm/cacheflush.h> +#include <asm/irq_regs.h> +#include <asm/pmu.h> +#include <asm/smp_plat.h> + +#define DRIVER_NAME "CCI" + +#define CCI_PORT_CTRL 0x0 +#define CCI_CTRL_STATUS 0xc + +#define CCI_ENABLE_SNOOP_REQ 0x1 +#define CCI_ENABLE_DVM_REQ 0x2 +#define CCI_ENABLE_REQ (CCI_ENABLE_SNOOP_REQ | CCI_ENABLE_DVM_REQ) + +struct cci_nb_ports { + unsigned int nb_ace; + unsigned int nb_ace_lite; +}; + +enum cci_ace_port_type { + ACE_INVALID_PORT = 0x0, + ACE_PORT, + ACE_LITE_PORT, +}; + +struct cci_ace_port { + void __iomem *base; + unsigned long phys; + enum cci_ace_port_type type; + struct device_node *dn; +}; + +static struct cci_ace_port *ports; +static unsigned int nb_cci_ports; + +static void __iomem *cci_ctrl_base; +static unsigned long cci_ctrl_phys; + +#ifdef CONFIG_HW_PERF_EVENTS + +static void __iomem *cci_pmu_base; + +#define CCI400_PMCR 0x0100 + +#define CCI400_PMU_CYCLE_CNTR_BASE 0x0000 +#define CCI400_PMU_CNTR_BASE(idx) (CCI400_PMU_CYCLE_CNTR_BASE + (idx) * 0x1000) + +#define CCI400_PMCR_CEN 0x00000001 +#define CCI400_PMCR_RST 0x00000002 +#define CCI400_PMCR_CCR 0x00000004 +#define CCI400_PMCR_CCD 0x00000008 +#define CCI400_PMCR_EX 0x00000010 +#define CCI400_PMCR_DP 0x00000020 +#define CCI400_PMCR_NCNT_MASK 0x0000F800 +#define CCI400_PMCR_NCNT_SHIFT 11 + +#define CCI400_PMU_EVT_SEL 0x000 +#define CCI400_PMU_CNTR 0x004 +#define CCI400_PMU_CNTR_CTRL 0x008 +#define CCI400_PMU_OVERFLOW 0x00C + +#define CCI400_PMU_OVERFLOW_FLAG 1 + +enum cci400_perf_events { + CCI400_PMU_CYCLES = 0xFF +}; + +#define CCI400_PMU_EVENT_MASK 0xff +#define CCI400_PMU_EVENT_SOURCE(event) ((event >> 5) & 0x7) +#define CCI400_PMU_EVENT_CODE(event) (event & 0x1f) + +#define CCI400_PMU_EVENT_SOURCE_S0 0 +#define CCI400_PMU_EVENT_SOURCE_S4 4 +#define CCI400_PMU_EVENT_SOURCE_M0 5 +#define CCI400_PMU_EVENT_SOURCE_M2 7 + +#define CCI400_PMU_EVENT_SLAVE_MIN 0x0 +#define CCI400_PMU_EVENT_SLAVE_MAX 0x13 + +#define CCI400_PMU_EVENT_MASTER_MIN 0x14 +#define CCI400_PMU_EVENT_MASTER_MAX 0x1A + +#define CCI400_PMU_MAX_HW_EVENTS 5 /* CCI PMU has 4 counters + 1 cycle counter */ + +#define CCI400_PMU_CYCLE_COUNTER_IDX 0 +#define CCI400_PMU_COUNTER0_IDX 1 +#define CCI400_PMU_COUNTER_LAST(cci_pmu) (CCI400_PMU_CYCLE_COUNTER_IDX + cci_pmu->num_events - 1) + + +static struct perf_event *events[CCI400_PMU_MAX_HW_EVENTS]; +static unsigned long used_mask[BITS_TO_LONGS(CCI400_PMU_MAX_HW_EVENTS)]; +static struct pmu_hw_events cci_hw_events = { + .events = events, + .used_mask = used_mask, +}; + +static int cci_pmu_validate_hw_event(u8 hw_event) +{ + u8 ev_source = CCI400_PMU_EVENT_SOURCE(hw_event); + u8 ev_code = CCI400_PMU_EVENT_CODE(hw_event); + + if (ev_source <= CCI400_PMU_EVENT_SOURCE_S4 && + ev_code <= CCI400_PMU_EVENT_SLAVE_MAX) + return hw_event; + else if (CCI400_PMU_EVENT_SOURCE_M0 <= ev_source && + ev_source <= CCI400_PMU_EVENT_SOURCE_M2 && + CCI400_PMU_EVENT_MASTER_MIN <= ev_code && + ev_code <= CCI400_PMU_EVENT_MASTER_MAX) + return hw_event; + + return -EINVAL; +} + +static inline int cci_pmu_counter_is_valid(struct arm_pmu *cci_pmu, int idx) +{ + return CCI400_PMU_CYCLE_COUNTER_IDX <= idx && + idx <= CCI400_PMU_COUNTER_LAST(cci_pmu); +} + +static inline u32 cci_pmu_read_register(int idx, unsigned int offset) +{ + return readl_relaxed(cci_pmu_base + CCI400_PMU_CNTR_BASE(idx) + offset); +} + +static inline void cci_pmu_write_register(u32 value, int idx, unsigned int offset) +{ + return writel_relaxed(value, cci_pmu_base + CCI400_PMU_CNTR_BASE(idx) + offset); +} + +static inline void cci_pmu_disable_counter(int idx) +{ + cci_pmu_write_register(0, idx, CCI400_PMU_CNTR_CTRL); +} + +static inline void cci_pmu_enable_counter(int idx) +{ + cci_pmu_write_register(1, idx, CCI400_PMU_CNTR_CTRL); +} + +static inline void cci_pmu_select_event(int idx, unsigned long event) +{ + event &= CCI400_PMU_EVENT_MASK; + cci_pmu_write_register(event, idx, CCI400_PMU_EVT_SEL); +} + +static u32 cci_pmu_get_max_counters(void) +{ + u32 n_cnts = (readl_relaxed(cci_ctrl_base + CCI400_PMCR) & + CCI400_PMCR_NCNT_MASK) >> CCI400_PMCR_NCNT_SHIFT; + + /* add 1 for cycle counter */ + return n_cnts + 1; +} + +static struct pmu_hw_events *cci_pmu_get_hw_events(void) +{ + return &cci_hw_events; +} + +static int cci_pmu_get_event_idx(struct pmu_hw_events *hw, struct perf_event *event) +{ + struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hw_event = &event->hw; + unsigned long cci_event = hw_event->config_base & CCI400_PMU_EVENT_MASK; + int idx; + + if (cci_event == CCI400_PMU_CYCLES) { + if (test_and_set_bit(CCI400_PMU_CYCLE_COUNTER_IDX, hw->used_mask)) + return -EAGAIN; + + return CCI400_PMU_CYCLE_COUNTER_IDX; + } + + for (idx = CCI400_PMU_COUNTER0_IDX; idx <= CCI400_PMU_COUNTER_LAST(cci_pmu); ++idx) { + if (!test_and_set_bit(idx, hw->used_mask)) + return idx; + } + + /* No counters available */ + return -EAGAIN; +} + +static int cci_pmu_map_event(struct perf_event *event) +{ + int mapping; + u8 config = event->attr.config & CCI400_PMU_EVENT_MASK; + + if (event->attr.type < PERF_TYPE_MAX) + return -ENOENT; + + /* 0xff is used to represent CCI Cycles */ + if (config == 0xff) + mapping = config; + else + mapping = cci_pmu_validate_hw_event(config); + + return mapping; +} + +static int cci_pmu_request_irq(struct arm_pmu *cci_pmu, irq_handler_t handler) +{ + int irq, err, i = 0; + struct platform_device *pmu_device = cci_pmu->plat_device; + + if (unlikely(!pmu_device)) + return -ENODEV; + + /* CCI exports 6 interrupts - 1 nERRORIRQ + 5 nEVNTCNTOVERFLOW (PMU) + nERRORIRQ will be handled by secure firmware on TC2. So we + assume that all CCI interrupts listed in the linux device + tree are PMU interrupts. + + The following code should then be able to handle different routing + of the CCI PMU interrupts. + */ + while ((irq = platform_get_irq(pmu_device, i)) > 0) { + err = request_irq(irq, handler, 0, "arm-cci-pmu", cci_pmu); + if (err) { + dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n", + irq); + return err; + } + i++; + } + + return 0; +} + +static irqreturn_t cci_pmu_handle_irq(int irq_num, void *dev) +{ + struct arm_pmu *cci_pmu = (struct arm_pmu *)dev; + struct pmu_hw_events *events = cci_pmu->get_hw_events(); + struct perf_sample_data data; + struct pt_regs *regs; + int idx; + + regs = get_irq_regs(); + + /* Iterate over counters and update the corresponding perf events. + This should work regardless of whether we have per-counter overflow + interrupt or a combined overflow interrupt. */ + for (idx = CCI400_PMU_CYCLE_COUNTER_IDX; idx <= CCI400_PMU_COUNTER_LAST(cci_pmu); idx++) { + struct perf_event *event = events->events[idx]; + struct hw_perf_event *hw_counter; + + if (!event) + continue; + + hw_counter = &event->hw; + + /* Did this counter overflow? */ + if (!(cci_pmu_read_register(idx, CCI400_PMU_OVERFLOW) & CCI400_PMU_OVERFLOW_FLAG)) + continue; + cci_pmu_write_register(CCI400_PMU_OVERFLOW_FLAG, idx, CCI400_PMU_OVERFLOW); + + armpmu_event_update(event); + perf_sample_data_init(&data, 0, hw_counter->last_period); + if (!armpmu_event_set_period(event)) + continue; + + if (perf_event_overflow(event, &data, regs)) + cci_pmu->disable(event); + } + + irq_work_run(); + return IRQ_HANDLED; +} + +static void cci_pmu_free_irq(struct arm_pmu *cci_pmu) +{ + int irq, i = 0; + struct platform_device *pmu_device = cci_pmu->plat_device; + + while ((irq = platform_get_irq(pmu_device, i)) > 0) { + free_irq(irq, cci_pmu); + i++; + } +} + +static void cci_pmu_enable_event(struct perf_event *event) +{ + unsigned long flags; + struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); + struct pmu_hw_events *events = cci_pmu->get_hw_events(); + struct hw_perf_event *hw_counter = &event->hw; + int idx = hw_counter->idx; + + if (unlikely(!cci_pmu_counter_is_valid(cci_pmu, idx))) { + dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); + return; + } + + raw_spin_lock_irqsave(&events->pmu_lock, flags); + + /* Configure the event to count, unless you are counting cycles */ + if (idx != CCI400_PMU_CYCLE_COUNTER_IDX) + cci_pmu_select_event(idx, hw_counter->config_base); + + cci_pmu_enable_counter(idx); + + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); +} + +static void cci_pmu_disable_event(struct perf_event *event) +{ + unsigned long flags; + struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); + struct pmu_hw_events *events = cci_pmu->get_hw_events(); + struct hw_perf_event *hw_counter = &event->hw; + int idx = hw_counter->idx; + + if (unlikely(!cci_pmu_counter_is_valid(cci_pmu, idx))) { + dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); + return; + } + + raw_spin_lock_irqsave(&events->pmu_lock, flags); + + cci_pmu_disable_counter(idx); + + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); +} + +static void cci_pmu_start(struct arm_pmu *cci_pmu) +{ + u32 val; + unsigned long flags; + struct pmu_hw_events *events = cci_pmu->get_hw_events(); + + raw_spin_lock_irqsave(&events->pmu_lock, flags); + + /* Enable all the PMU counters. */ + val = readl(cci_ctrl_base + CCI400_PMCR) | CCI400_PMCR_CEN; + writel(val, cci_ctrl_base + CCI400_PMCR); + + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); +} + +static void cci_pmu_stop(struct arm_pmu *cci_pmu) +{ + u32 val; + unsigned long flags; + struct pmu_hw_events *events = cci_pmu->get_hw_events(); + + raw_spin_lock_irqsave(&events->pmu_lock, flags); + + /* Disable all the PMU counters. */ + val = readl(cci_ctrl_base + CCI400_PMCR) & ~CCI400_PMCR_CEN; + writel(val, cci_ctrl_base + CCI400_PMCR); + + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); +} + +static u32 cci_pmu_read_counter(struct perf_event *event) +{ + struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hw_counter = &event->hw; + int idx = hw_counter->idx; + u32 value; + + if (unlikely(!cci_pmu_counter_is_valid(cci_pmu, idx))) { + dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); + return 0; + } + value = cci_pmu_read_register(idx, CCI400_PMU_CNTR); + + return value; +} + +static void cci_pmu_write_counter(struct perf_event *event, u32 value) +{ + struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hw_counter = &event->hw; + int idx = hw_counter->idx; + + if (unlikely(!cci_pmu_counter_is_valid(cci_pmu, idx))) + dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); + else + cci_pmu_write_register(value, idx, CCI400_PMU_CNTR); +} + +static struct arm_pmu cci_pmu = { + .name = DRIVER_NAME, + .max_period = (1LLU << 32) - 1, + .get_hw_events = cci_pmu_get_hw_events, + .get_event_idx = cci_pmu_get_event_idx, + .map_event = cci_pmu_map_event, + .request_irq = cci_pmu_request_irq, + .handle_irq = cci_pmu_handle_irq, + .free_irq = cci_pmu_free_irq, + .enable = cci_pmu_enable_event, + .disable = cci_pmu_disable_event, + .start = cci_pmu_start, + .stop = cci_pmu_stop, + .read_counter = cci_pmu_read_counter, + .write_counter = cci_pmu_write_counter, +}; + +static int cci_pmu_probe(struct platform_device *pdev) +{ + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + cci_pmu_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(cci_pmu_base)) + return PTR_ERR(cci_pmu_base); + + cci_pmu.plat_device = pdev; + cci_pmu.num_events = cci_pmu_get_max_counters(); + raw_spin_lock_init(&cci_hw_events.pmu_lock); + cpumask_setall(&cci_pmu.valid_cpus); + + return armpmu_register(&cci_pmu, -1); +} + +static const struct of_device_id arm_cci_pmu_matches[] = { + {.compatible = "arm,cci-400-pmu"}, + {}, +}; + +static struct platform_driver cci_pmu_platform_driver = { + .driver = { + .name = DRIVER_NAME, + .of_match_table = arm_cci_pmu_matches, + }, + .probe = cci_pmu_probe, +}; + +static int __init cci_pmu_init(void) +{ + if (platform_driver_register(&cci_pmu_platform_driver)) + WARN(1, "unable to register CCI platform driver\n"); + return 0; +} + +#else + +static int __init cci_pmu_init(void) +{ + return 0; +} + +static void cci_pmu_destroy(void) { } + +#endif /* CONFIG_HW_PERF_EVENTS */ + +struct cpu_port { + u64 mpidr; + u32 port; +}; + +/* + * Use the port MSB as valid flag, shift can be made dynamic + * by computing number of bits required for port indexes. + * Code disabling CCI cpu ports runs with D-cache invalidated + * and SCTLR bit clear so data accesses must be kept to a minimum + * to improve performance; for now shift is left static to + * avoid one more data access while disabling the CCI port. + */ +#define PORT_VALID_SHIFT 31 +#define PORT_VALID (0x1 << PORT_VALID_SHIFT) + +static inline void init_cpu_port(struct cpu_port *port, u32 index, u64 mpidr) +{ + port->port = PORT_VALID | index; + port->mpidr = mpidr; +} + +static inline bool cpu_port_is_valid(struct cpu_port *port) +{ + return !!(port->port & PORT_VALID); +} + +static inline bool cpu_port_match(struct cpu_port *port, u64 mpidr) +{ + return port->mpidr == (mpidr & MPIDR_HWID_BITMASK); +} + +static struct cpu_port cpu_port[NR_CPUS]; + +/** + * __cci_ace_get_port - Function to retrieve the port index connected to + * a cpu or device. + * + * @dn: device node of the device to look-up + * @type: port type + * + * Return value: + * - CCI port index if success + * - -ENODEV if failure + */ +static int __cci_ace_get_port(struct device_node *dn, int type) +{ + int i; + bool ace_match; + struct device_node *cci_portn; + + cci_portn = of_parse_phandle(dn, "cci-control-port", 0); + for (i = 0; i < nb_cci_ports; i++) { + ace_match = ports[i].type == type; + if (ace_match && cci_portn == ports[i].dn) + return i; + } + return -ENODEV; +} + +int cci_ace_get_port(struct device_node *dn) +{ + return __cci_ace_get_port(dn, ACE_LITE_PORT); +} +EXPORT_SYMBOL_GPL(cci_ace_get_port); + +static void __init cci_ace_init_ports(void) +{ + int port, ac, cpu; + u64 hwid; + const u32 *cell; + struct device_node *cpun, *cpus; + + cpus = of_find_node_by_path("/cpus"); + if (WARN(!cpus, "Missing cpus node, bailing out\n")) + return; + + if (WARN_ON(of_property_read_u32(cpus, "#address-cells", &ac))) + ac = of_n_addr_cells(cpus); + + /* + * Port index look-up speeds up the function disabling ports by CPU, + * since the logical to port index mapping is done once and does + * not change after system boot. + * The stashed index array is initialized for all possible CPUs + * at probe time. + */ + for_each_child_of_node(cpus, cpun) { + if (of_node_cmp(cpun->type, "cpu")) + continue; + cell = of_get_property(cpun, "reg", NULL); + if (WARN(!cell, "%s: missing reg property\n", cpun->full_name)) + continue; + + hwid = of_read_number(cell, ac); + cpu = get_logical_index(hwid & MPIDR_HWID_BITMASK); + + if (cpu < 0 || !cpu_possible(cpu)) + continue; + port = __cci_ace_get_port(cpun, ACE_PORT); + if (port < 0) + continue; + + init_cpu_port(&cpu_port[cpu], port, cpu_logical_map(cpu)); + } + + for_each_possible_cpu(cpu) { + WARN(!cpu_port_is_valid(&cpu_port[cpu]), + "CPU %u does not have an associated CCI port\n", + cpu); + } +} +/* + * Functions to enable/disable a CCI interconnect slave port + * + * They are called by low-level power management code to disable slave + * interfaces snoops and DVM broadcast. + * Since they may execute with cache data allocation disabled and + * after the caches have been cleaned and invalidated the functions provide + * no explicit locking since they may run with D-cache disabled, so normal + * cacheable kernel locks based on ldrex/strex may not work. + * Locking has to be provided by BSP implementations to ensure proper + * operations. + */ + +/** + * cci_port_control() - function to control a CCI port + * + * @port: index of the port to setup + * @enable: if true enables the port, if false disables it + */ +static void notrace cci_port_control(unsigned int port, bool enable) +{ + void __iomem *base = ports[port].base; + + writel_relaxed(enable ? CCI_ENABLE_REQ : 0, base + CCI_PORT_CTRL); + /* + * This function is called from power down procedures + * and must not execute any instruction that might + * cause the processor to be put in a quiescent state + * (eg wfi). Hence, cpu_relax() can not be added to this + * read loop to optimize power, since it might hide possibly + * disruptive operations. + */ + while (readl_relaxed(cci_ctrl_base + CCI_CTRL_STATUS) & 0x1) + ; +} + +/** + * cci_disable_port_by_cpu() - function to disable a CCI port by CPU + * reference + * + * @mpidr: mpidr of the CPU whose CCI port should be disabled + * + * Disabling a CCI port for a CPU implies disabling the CCI port + * controlling that CPU cluster. Code disabling CPU CCI ports + * must make sure that the CPU running the code is the last active CPU + * in the cluster ie all other CPUs are quiescent in a low power state. + * + * Return: + * 0 on success + * -ENODEV on port look-up failure + */ +int notrace cci_disable_port_by_cpu(u64 mpidr) +{ + int cpu; + bool is_valid; + for (cpu = 0; cpu < nr_cpu_ids; cpu++) { + is_valid = cpu_port_is_valid(&cpu_port[cpu]); + if (is_valid && cpu_port_match(&cpu_port[cpu], mpidr)) { + cci_port_control(cpu_port[cpu].port, false); + return 0; + } + } + return -ENODEV; +} +EXPORT_SYMBOL_GPL(cci_disable_port_by_cpu); + +/** + * cci_enable_port_for_self() - enable a CCI port for calling CPU + * + * Enabling a CCI port for the calling CPU implies enabling the CCI + * port controlling that CPU's cluster. Caller must make sure that the + * CPU running the code is the first active CPU in the cluster and all + * other CPUs are quiescent in a low power state or waiting for this CPU + * to complete the CCI initialization. + * + * Because this is called when the MMU is still off and with no stack, + * the code must be position independent and ideally rely on callee + * clobbered registers only. To achieve this we must code this function + * entirely in assembler. + * + * On success this returns with the proper CCI port enabled. In case of + * any failure this never returns as the inability to enable the CCI is + * fatal and there is no possible recovery at this stage. + */ +asmlinkage void __naked cci_enable_port_for_self(void) +{ + asm volatile ("\n" + +" mrc p15, 0, r0, c0, c0, 5 @ get MPIDR value \n" +" and r0, r0, #"__stringify(MPIDR_HWID_BITMASK)" \n" +" adr r1, 5f \n" +" ldr r2, [r1] \n" +" add r1, r1, r2 @ &cpu_port \n" +" add ip, r1, %[sizeof_cpu_port] \n" + + /* Loop over the cpu_port array looking for a matching MPIDR */ +"1: ldr r2, [r1, %[offsetof_cpu_port_mpidr_lsb]] \n" +" cmp r2, r0 @ compare MPIDR \n" +" bne 2f \n" + + /* Found a match, now test port validity */ +" ldr r3, [r1, %[offsetof_cpu_port_port]] \n" +" tst r3, #"__stringify(PORT_VALID)" \n" +" bne 3f \n" + + /* no match, loop with the next cpu_port entry */ +"2: add r1, r1, %[sizeof_struct_cpu_port] \n" +" cmp r1, ip @ done? \n" +" blo 1b \n" + + /* CCI port not found -- cheaply try to stall this CPU */ +"cci_port_not_found: \n" +" wfi \n" +" wfe \n" +" b cci_port_not_found \n" + + /* Use matched port index to look up the corresponding ports entry */ +"3: bic r3, r3, #"__stringify(PORT_VALID)" \n" +" adr r0, 6f \n" +" ldmia r0, {r1, r2} \n" +" sub r1, r1, r0 @ virt - phys \n" +" ldr r0, [r0, r2] @ *(&ports) \n" +" mov r2, %[sizeof_struct_ace_port] \n" +" mla r0, r2, r3, r0 @ &ports[index] \n" +" sub r0, r0, r1 @ virt_to_phys() \n" + + /* Enable the CCI port */ +" ldr r0, [r0, %[offsetof_port_phys]] \n" +" mov r3, #"__stringify(CCI_ENABLE_REQ)" \n" +" str r3, [r0, #"__stringify(CCI_PORT_CTRL)"] \n" + + /* poll the status reg for completion */ +" adr r1, 7f \n" +" ldr r0, [r1] \n" +" ldr r0, [r0, r1] @ cci_ctrl_base \n" +"4: ldr r1, [r0, #"__stringify(CCI_CTRL_STATUS)"] \n" +" tst r1, #1 \n" +" bne 4b \n" + +" mov r0, #0 \n" +" bx lr \n" + +" .align 2 \n" +"5: .word cpu_port - . \n" +"6: .word . \n" +" .word ports - 6b \n" +"7: .word cci_ctrl_phys - . \n" + : : + [sizeof_cpu_port] "i" (sizeof(cpu_port)), +#ifndef __ARMEB__ + [offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)), +#else + [offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)+4), +#endif + [offsetof_cpu_port_port] "i" (offsetof(struct cpu_port, port)), + [sizeof_struct_cpu_port] "i" (sizeof(struct cpu_port)), + [sizeof_struct_ace_port] "i" (sizeof(struct cci_ace_port)), + [offsetof_port_phys] "i" (offsetof(struct cci_ace_port, phys)) ); + + unreachable(); +} + +/** + * __cci_control_port_by_device() - function to control a CCI port by device + * reference + * + * @dn: device node pointer of the device whose CCI port should be + * controlled + * @enable: if true enables the port, if false disables it + * + * Return: + * 0 on success + * -ENODEV on port look-up failure + */ +int notrace __cci_control_port_by_device(struct device_node *dn, bool enable) +{ + int port; + + if (!dn) + return -ENODEV; + + port = __cci_ace_get_port(dn, ACE_LITE_PORT); + if (WARN_ONCE(port < 0, "node %s ACE lite port look-up failure\n", + dn->full_name)) + return -ENODEV; + cci_port_control(port, enable); + return 0; +} +EXPORT_SYMBOL_GPL(__cci_control_port_by_device); + +/** + * __cci_control_port_by_index() - function to control a CCI port by port index + * + * @port: port index previously retrieved with cci_ace_get_port() + * @enable: if true enables the port, if false disables it + * + * Return: + * 0 on success + * -ENODEV on port index out of range + * -EPERM if operation carried out on an ACE PORT + */ +int notrace __cci_control_port_by_index(u32 port, bool enable) +{ + if (port >= nb_cci_ports || ports[port].type == ACE_INVALID_PORT) + return -ENODEV; + /* + * CCI control for ports connected to CPUS is extremely fragile + * and must be made to go through a specific and controlled + * interface (ie cci_disable_port_by_cpu(); control by general purpose + * indexing is therefore disabled for ACE ports. + */ + if (ports[port].type == ACE_PORT) + return -EPERM; + + cci_port_control(port, enable); + return 0; +} +EXPORT_SYMBOL_GPL(__cci_control_port_by_index); + +static const struct cci_nb_ports cci400_ports = { + .nb_ace = 2, + .nb_ace_lite = 3 +}; + +static const struct of_device_id arm_cci_matches[] = { + {.compatible = "arm,cci-400", .data = &cci400_ports }, + {}, +}; + +static const struct of_device_id arm_cci_ctrl_if_matches[] = { + {.compatible = "arm,cci-400-ctrl-if", }, + {}, +}; + +static int __init cci_probe(void) +{ + struct cci_nb_ports const *cci_config; + int ret, i, nb_ace = 0, nb_ace_lite = 0; + struct device_node *np, *cp; + struct resource res; + const char *match_str; + bool is_ace; + + np = of_find_matching_node(NULL, arm_cci_matches); + if (!np) + return -ENODEV; + + cci_config = of_match_node(arm_cci_matches, np)->data; + if (!cci_config) + return -ENODEV; + + nb_cci_ports = cci_config->nb_ace + cci_config->nb_ace_lite; + + ports = kcalloc(sizeof(*ports), nb_cci_ports, GFP_KERNEL); + if (!ports) + return -ENOMEM; + + ret = of_address_to_resource(np, 0, &res); + if (!ret) { + cci_ctrl_base = ioremap(res.start, resource_size(&res)); + cci_ctrl_phys = res.start; + } + if (ret || !cci_ctrl_base) { + WARN(1, "unable to ioremap CCI ctrl\n"); + ret = -ENXIO; + goto memalloc_err; + } + + for_each_child_of_node(np, cp) { + if (!of_match_node(arm_cci_ctrl_if_matches, cp)) + continue; + + i = nb_ace + nb_ace_lite; + + if (i >= nb_cci_ports) + break; + + if (of_property_read_string(cp, "interface-type", + &match_str)) { + WARN(1, "node %s missing interface-type property\n", + cp->full_name); + continue; + } + is_ace = strcmp(match_str, "ace") == 0; + if (!is_ace && strcmp(match_str, "ace-lite")) { + WARN(1, "node %s containing invalid interface-type property, skipping it\n", + cp->full_name); + continue; + } + + ret = of_address_to_resource(cp, 0, &res); + if (!ret) { + ports[i].base = ioremap(res.start, resource_size(&res)); + ports[i].phys = res.start; + } + if (ret || !ports[i].base) { + WARN(1, "unable to ioremap CCI port %d\n", i); + continue; + } + + if (is_ace) { + if (WARN_ON(nb_ace >= cci_config->nb_ace)) + continue; + ports[i].type = ACE_PORT; + ++nb_ace; + } else { + if (WARN_ON(nb_ace_lite >= cci_config->nb_ace_lite)) + continue; + ports[i].type = ACE_LITE_PORT; + ++nb_ace_lite; + } + ports[i].dn = cp; + } + + /* initialize a stashed array of ACE ports to speed-up look-up */ + cci_ace_init_ports(); + + /* + * Multi-cluster systems may need this data when non-coherent, during + * cluster power-up/power-down. Make sure it reaches main memory. + */ + sync_cache_w(&cci_ctrl_base); + sync_cache_w(&cci_ctrl_phys); + sync_cache_w(&ports); + sync_cache_w(&cpu_port); + __sync_cache_range_w(ports, sizeof(*ports) * nb_cci_ports); + pr_info("ARM CCI driver probed\n"); + return 0; + +memalloc_err: + + kfree(ports); + return ret; +} + +static int cci_init_status = -EAGAIN; +static DEFINE_MUTEX(cci_probing); + +static int __init cci_init(void) +{ + if (cci_init_status != -EAGAIN) + return cci_init_status; + + mutex_lock(&cci_probing); + if (cci_init_status == -EAGAIN) + cci_init_status = cci_probe(); + mutex_unlock(&cci_probing); + return cci_init_status; +} + +/* + * To sort out early init calls ordering a helper function is provided to + * check if the CCI driver has beed initialized. Function check if the driver + * has been initialized, if not it calls the init function that probes + * the driver and updates the return value. + */ +bool __init cci_probed(void) +{ + return cci_init() == 0; +} +EXPORT_SYMBOL_GPL(cci_probed); + +early_initcall(cci_init); +core_initcall(cci_pmu_init); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ARM CCI support"); diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 0357ac44638..d0d9b212475 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -42,7 +42,7 @@ config COMMON_CLK_WM831X config COMMON_CLK_VERSATILE bool "Clock driver for ARM Reference designs" - depends on ARCH_INTEGRATOR || ARCH_REALVIEW || ARCH_VEXPRESS + depends on ARCH_INTEGRATOR || ARCH_REALVIEW || ARCH_VEXPRESS || ARM64 ---help--- Supports clocking on ARM Reference designs: - Integrator/AP and Integrator/CP diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c index 256c8be74df..2dc8b41a339 100644 --- a/drivers/clk/versatile/clk-vexpress-osc.c +++ b/drivers/clk/versatile/clk-vexpress-osc.c @@ -107,7 +107,7 @@ void __init vexpress_osc_of_setup(struct device_node *node) osc->func = vexpress_config_func_get_by_node(node); if (!osc->func) { pr_err("Failed to obtain config func for node '%s'!\n", - node->name); + node->full_name); goto error; } @@ -119,7 +119,7 @@ void __init vexpress_osc_of_setup(struct device_node *node) of_property_read_string(node, "clock-output-names", &init.name); if (!init.name) - init.name = node->name; + init.name = node->full_name; init.ops = &vexpress_osc_ops; init.flags = CLK_IS_ROOT; diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile index 0d8bd55e776..7d8256a5ea9 100644 --- a/drivers/cpuidle/Makefile +++ b/drivers/cpuidle/Makefile @@ -4,6 +4,6 @@ obj-y += cpuidle.o driver.o governor.o sysfs.o governors/ obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o - +obj-$(CONFIG_BIG_LITTLE) += arm_big_little.o obj-$(CONFIG_CPU_IDLE_CALXEDA) += cpuidle-calxeda.o obj-$(CONFIG_ARCH_KIRKWOOD) += cpuidle-kirkwood.o diff --git a/drivers/cpuidle/arm_big_little.c b/drivers/cpuidle/arm_big_little.c new file mode 100644 index 00000000000..e5378896a8c --- /dev/null +++ b/drivers/cpuidle/arm_big_little.c @@ -0,0 +1,183 @@ +/* + * big.LITTLE CPU idle driver. + * + * Copyright (C) 2012 ARM Ltd. + * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/arm-cci.h> +#include <linux/bitmap.h> +#include <linux/cpuidle.h> +#include <linux/cpu_pm.h> +#include <linux/clockchips.h> +#include <linux/debugfs.h> +#include <linux/hrtimer.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/tick.h> +#include <linux/vexpress.h> +#include <asm/mcpm.h> +#include <asm/cpuidle.h> +#include <asm/cputype.h> +#include <asm/idmap.h> +#include <asm/proc-fns.h> +#include <asm/suspend.h> +#include <linux/of.h> + +static int bl_cpuidle_simple_enter(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + ktime_t time_start, time_end; + s64 diff; + + time_start = ktime_get(); + + cpu_do_idle(); + + time_end = ktime_get(); + + local_irq_enable(); + + diff = ktime_to_us(ktime_sub(time_end, time_start)); + if (diff > INT_MAX) + diff = INT_MAX; + + dev->last_residency = (int) diff; + + return index; +} + +static int bl_enter_powerdown(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int idx); + +static struct cpuidle_state bl_cpuidle_set[] __initdata = { + [0] = { + .enter = bl_cpuidle_simple_enter, + .exit_latency = 1, + .target_residency = 1, + .power_usage = UINT_MAX, + .flags = CPUIDLE_FLAG_TIME_VALID, + .name = "WFI", + .desc = "ARM WFI", + }, + [1] = { + .enter = bl_enter_powerdown, + .exit_latency = 300, + .target_residency = 1000, + .flags = CPUIDLE_FLAG_TIME_VALID, + .name = "C1", + .desc = "ARM power down", + }, +}; + +struct cpuidle_driver bl_idle_driver = { + .name = "bl_idle", + .owner = THIS_MODULE, + .safe_state_index = 0 +}; + +static DEFINE_PER_CPU(struct cpuidle_device, bl_idle_dev); + +static int notrace bl_powerdown_finisher(unsigned long arg) +{ + unsigned int mpidr = read_cpuid_mpidr(); + unsigned int cluster = (mpidr >> 8) & 0xf; + unsigned int cpu = mpidr & 0xf; + + mcpm_set_entry_vector(cpu, cluster, cpu_resume); + mcpm_cpu_suspend(0); /* 0 should be replaced with better value here */ + return 1; +} + +/* + * bl_enter_powerdown - Programs CPU to enter the specified state + * @dev: cpuidle device + * @drv: The target state to be programmed + * @idx: state index + * + * Called from the CPUidle framework to program the device to the + * specified target state selected by the governor. + */ +static int bl_enter_powerdown(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int idx) +{ + struct timespec ts_preidle, ts_postidle, ts_idle; + int ret; + + /* Used to keep track of the total time in idle */ + getnstimeofday(&ts_preidle); + + BUG_ON(!irqs_disabled()); + + cpu_pm_enter(); + + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); + + ret = cpu_suspend((unsigned long) dev, bl_powerdown_finisher); + if (ret) + BUG(); + + mcpm_cpu_powered_up(); + + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); + + cpu_pm_exit(); + + getnstimeofday(&ts_postidle); + local_irq_enable(); + ts_idle = timespec_sub(ts_postidle, ts_preidle); + + dev->last_residency = ts_idle.tv_nsec / NSEC_PER_USEC + + ts_idle.tv_sec * USEC_PER_SEC; + return idx; +} + +/* + * bl_idle_init + * + * Registers the bl specific cpuidle driver with the cpuidle + * framework with the valid set of states. + */ +int __init bl_idle_init(void) +{ + struct cpuidle_device *dev; + int i, cpu_id; + struct cpuidle_driver *drv = &bl_idle_driver; + + if (!of_find_compatible_node(NULL, NULL, "arm,generic")) { + pr_info("%s: No compatible node found\n", __func__); + return -ENODEV; + } + + drv->state_count = (sizeof(bl_cpuidle_set) / + sizeof(struct cpuidle_state)); + + for (i = 0; i < drv->state_count; i++) { + memcpy(&drv->states[i], &bl_cpuidle_set[i], + sizeof(struct cpuidle_state)); + } + + cpuidle_register_driver(drv); + + for_each_cpu(cpu_id, cpu_online_mask) { + pr_err("CPUidle for CPU%d registered\n", cpu_id); + dev = &per_cpu(bl_idle_dev, cpu_id); + dev->cpu = cpu_id; + + dev->state_count = drv->state_count; + + if (cpuidle_register_device(dev)) { + printk(KERN_ERR "%s: Cpuidle register device failed\n", + __func__); + return -EIO; + } + } + + return 0; +} + +device_initcall(bl_idle_init); diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c index 223379169cb..0e6e408c0a6 100644 --- a/drivers/cpuidle/cpuidle-calxeda.c +++ b/drivers/cpuidle/cpuidle-calxeda.c @@ -37,20 +37,6 @@ extern void highbank_set_cpu_jump(int cpu, void *jump_addr); extern void *scu_base_addr; -static inline unsigned int get_auxcr(void) -{ - unsigned int val; - asm("mrc p15, 0, %0, c1, c0, 1 @ get AUXCR" : "=r" (val) : : "cc"); - return val; -} - -static inline void set_auxcr(unsigned int val) -{ - asm volatile("mcr p15, 0, %0, c1, c0, 1 @ set AUXCR" - : : "r" (val) : "cc"); - isb(); -} - static noinline void calxeda_idle_restore(void) { set_cr(get_cr() | CR_C); diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 19ceaa60e0f..fe44d3e2c70 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -453,6 +453,12 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) writel_relaxed(1, base + GIC_CPU_CTRL); } +void gic_cpu_if_down(void) +{ + void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]); + writel_relaxed(0, cpu_base + GIC_CPU_CTRL); +} + #ifdef CONFIG_CPU_PM /* * Saves the GIC distributor registers during suspend or idle. Must be called diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index d54e985748b..a5e54f0d6a7 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -1144,7 +1144,15 @@ config MCP_UCB1200_TS endmenu config VEXPRESS_CONFIG - bool + bool "ARM Versatile Express platform infrastructure" + depends on ARM || ARM64 help Platform configuration infrastructure for the ARM Ltd. Versatile Express. + +config VEXPRESS_SPC + bool "Versatile Express SPC driver support" + depends on ARM + depends on VEXPRESS_CONFIG + help + Serial Power Controller driver for ARM Ltd. test chips. diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 718e94a2a9a..3a0120315aa 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -153,5 +153,6 @@ obj-$(CONFIG_MFD_SEC_CORE) += sec-core.o sec-irq.o obj-$(CONFIG_MFD_SYSCON) += syscon.o obj-$(CONFIG_MFD_LM3533) += lm3533-core.o lm3533-ctrlbank.o obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o vexpress-sysreg.o +obj-$(CONFIG_VEXPRESS_SPC) += vexpress-spc.o obj-$(CONFIG_MFD_RETU) += retu-mfd.o obj-$(CONFIG_MFD_AS3711) += as3711.o diff --git a/drivers/mfd/vexpress-config.c b/drivers/mfd/vexpress-config.c index 84ce6b9daa3..1af2b0e0182 100644 --- a/drivers/mfd/vexpress-config.c +++ b/drivers/mfd/vexpress-config.c @@ -86,29 +86,13 @@ void vexpress_config_bridge_unregister(struct vexpress_config_bridge *bridge) } EXPORT_SYMBOL(vexpress_config_bridge_unregister); - -struct vexpress_config_func { - struct vexpress_config_bridge *bridge; - void *func; -}; - -struct vexpress_config_func *__vexpress_config_func_get(struct device *dev, - struct device_node *node) +static struct vexpress_config_bridge * + vexpress_config_bridge_find(struct device_node *node) { - struct device_node *bridge_node; - struct vexpress_config_func *func; int i; + struct vexpress_config_bridge *res = NULL; + struct device_node *bridge_node = of_node_get(node); - if (WARN_ON(dev && node && dev->of_node != node)) - return NULL; - if (dev && !node) - node = dev->of_node; - - func = kzalloc(sizeof(*func), GFP_KERNEL); - if (!func) - return NULL; - - bridge_node = of_node_get(node); while (bridge_node) { const __be32 *prop = of_get_property(bridge_node, "arm,vexpress,config-bridge", NULL); @@ -129,13 +113,46 @@ struct vexpress_config_func *__vexpress_config_func_get(struct device *dev, if (test_bit(i, vexpress_config_bridges_map) && bridge->node == bridge_node) { - func->bridge = bridge; - func->func = bridge->info->func_get(dev, node); + res = bridge; break; } } mutex_unlock(&vexpress_config_bridges_mutex); + return res; +} + + +struct vexpress_config_func { + struct vexpress_config_bridge *bridge; + void *func; +}; + +struct vexpress_config_func *__vexpress_config_func_get( + struct vexpress_config_bridge *bridge, + struct device *dev, + struct device_node *node, + const char *id) +{ + struct vexpress_config_func *func; + + if (WARN_ON(dev && node && dev->of_node != node)) + return NULL; + if (dev && !node) + node = dev->of_node; + + if (!bridge) + bridge = vexpress_config_bridge_find(node); + if (!bridge) + return NULL; + + func = kzalloc(sizeof(*func), GFP_KERNEL); + if (!func) + return NULL; + + func->bridge = bridge; + func->func = bridge->info->func_get(dev, node, id); + if (!func->func) { of_node_put(node); kfree(func); diff --git a/drivers/mfd/vexpress-spc.c b/drivers/mfd/vexpress-spc.c new file mode 100644 index 00000000000..0c6718abf1b --- /dev/null +++ b/drivers/mfd/vexpress-spc.c @@ -0,0 +1,633 @@ +/* + * Versatile Express Serial Power Controller (SPC) support + * + * Copyright (C) 2013 ARM Ltd. + * + * Authors: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com> + * Achin Gupta <achin.gupta@arm.com> + * Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/device.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/slab.h> +#include <linux/vexpress.h> + +#include <asm/cacheflush.h> + +#define SCC_CFGREG19 0x120 +#define SCC_CFGREG20 0x124 +#define A15_CONF 0x400 +#define A7_CONF 0x500 +#define SYS_INFO 0x700 +#define PERF_LVL_A15 0xB00 +#define PERF_REQ_A15 0xB04 +#define PERF_LVL_A7 0xB08 +#define PERF_REQ_A7 0xB0c +#define SYS_CFGCTRL 0xB10 +#define SYS_CFGCTRL_REQ 0xB14 +#define PWC_STATUS 0xB18 +#define PWC_FLAG 0xB1c +#define WAKE_INT_MASK 0xB24 +#define WAKE_INT_RAW 0xB28 +#define WAKE_INT_STAT 0xB2c +#define A15_PWRDN_EN 0xB30 +#define A7_PWRDN_EN 0xB34 +#define A7_PWRDNACK 0xB54 +#define A15_BX_ADDR0 0xB68 +#define SYS_CFG_WDATA 0xB70 +#define SYS_CFG_RDATA 0xB74 +#define A7_BX_ADDR0 0xB78 + +#define GBL_WAKEUP_INT_MSK (0x3 << 10) + +#define CLKF_SHIFT 16 +#define CLKF_MASK 0x1FFF +#define CLKR_SHIFT 0 +#define CLKR_MASK 0x3F +#define CLKOD_SHIFT 8 +#define CLKOD_MASK 0xF + +#define OPP_FUNCTION 6 +#define OPP_BASE_DEVICE 0x300 +#define OPP_A15_OFFSET 0x4 +#define OPP_A7_OFFSET 0xc + +#define SYS_CFGCTRL_START (1 << 31) +#define SYS_CFGCTRL_WRITE (1 << 30) +#define SYS_CFGCTRL_FUNC(n) (((n) & 0x3f) << 20) +#define SYS_CFGCTRL_DEVICE(n) (((n) & 0xfff) << 0) + +#define MAX_OPPS 8 +#define MAX_CLUSTERS 2 + +enum { + A15_OPP_TYPE = 0, + A7_OPP_TYPE = 1, + SYS_CFGCTRL_TYPE = 2, + INVALID_TYPE +}; + +#define STAT_COMPLETE(type) ((1 << 0) << (type << 2)) +#define STAT_ERR(type) ((1 << 1) << (type << 2)) +#define RESPONSE_MASK(type) (STAT_COMPLETE(type) | STAT_ERR(type)) + +struct vexpress_spc_drvdata { + void __iomem *baseaddr; + u32 a15_clusid; + int irq; + u32 cur_req_type; + u32 freqs[MAX_CLUSTERS][MAX_OPPS]; + int freqs_cnt[MAX_CLUSTERS]; +}; + +enum spc_func_type { + CONFIG_FUNC = 0, + PERF_FUNC = 1, +}; + +struct vexpress_spc_func { + enum spc_func_type type; + u32 function; + u32 device; +}; + +static struct vexpress_spc_drvdata *info; +static u32 *vexpress_spc_config_data; +static struct vexpress_config_bridge *vexpress_spc_config_bridge; +static struct vexpress_config_func *opp_func, *perf_func; + +static int vexpress_spc_load_result = -EAGAIN; + +static bool vexpress_spc_initialized(void) +{ + return vexpress_spc_load_result == 0; +} + +/** + * vexpress_spc_write_resume_reg() - set the jump address used for warm boot + * + * @cluster: mpidr[15:8] bitfield describing cluster affinity level + * @cpu: mpidr[7:0] bitfield describing cpu affinity level + * @addr: physical resume address + */ +void vexpress_spc_write_resume_reg(u32 cluster, u32 cpu, u32 addr) +{ + void __iomem *baseaddr; + + if (WARN_ON_ONCE(cluster >= MAX_CLUSTERS)) + return; + + if (cluster != info->a15_clusid) + baseaddr = info->baseaddr + A7_BX_ADDR0 + (cpu << 2); + else + baseaddr = info->baseaddr + A15_BX_ADDR0 + (cpu << 2); + + writel_relaxed(addr, baseaddr); +} + +/** + * vexpress_spc_get_nb_cpus() - get number of cpus in a cluster + * + * @cluster: mpidr[15:8] bitfield describing cluster affinity level + * + * Return: number of cpus in the cluster + * -EINVAL if cluster number invalid + */ +int vexpress_spc_get_nb_cpus(u32 cluster) +{ + u32 val; + + if (WARN_ON_ONCE(cluster >= MAX_CLUSTERS)) + return -EINVAL; + + val = readl_relaxed(info->baseaddr + SYS_INFO); + val = (cluster != info->a15_clusid) ? (val >> 20) : (val >> 16); + return val & 0xf; +} +EXPORT_SYMBOL_GPL(vexpress_spc_get_nb_cpus); + +/** + * vexpress_spc_get_performance - get current performance level of cluster + * @cluster: mpidr[15:8] bitfield describing cluster affinity level + * @freq: pointer to the performance level to be assigned + * + * Return: 0 on success + * < 0 on read error + */ +int vexpress_spc_get_performance(u32 cluster, u32 *freq) +{ + u32 perf_cfg_reg; + int perf, ret; + + if (!vexpress_spc_initialized() || (cluster >= MAX_CLUSTERS)) + return -EINVAL; + + perf_cfg_reg = cluster != info->a15_clusid ? PERF_LVL_A7 : PERF_LVL_A15; + ret = vexpress_config_read(perf_func, perf_cfg_reg, &perf); + + if (!ret) + *freq = info->freqs[cluster][perf]; + + return ret; +} +EXPORT_SYMBOL_GPL(vexpress_spc_get_performance); + +/** + * vexpress_spc_get_perf_index - get performance level corresponding to + * a frequency + * @cluster: mpidr[15:8] bitfield describing cluster affinity level + * @freq: frequency to be looked-up + * + * Return: perf level index on success + * -EINVAL on error + */ +static int vexpress_spc_find_perf_index(u32 cluster, u32 freq) +{ + int idx; + + for (idx = 0; idx < info->freqs_cnt[cluster]; idx++) + if (info->freqs[cluster][idx] == freq) + break; + return (idx == info->freqs_cnt[cluster]) ? -EINVAL : idx; +} + +/** + * vexpress_spc_set_performance - set current performance level of cluster + * + * @cluster: mpidr[15:8] bitfield describing cluster affinity level + * @freq: performance level to be programmed + * + * Returns: 0 on success + * < 0 on write error + */ +int vexpress_spc_set_performance(u32 cluster, u32 freq) +{ + int ret, perf, offset; + + if (!vexpress_spc_initialized() || (cluster >= MAX_CLUSTERS)) + return -EINVAL; + + offset = (cluster != info->a15_clusid) ? PERF_LVL_A7 : PERF_LVL_A15; + + perf = vexpress_spc_find_perf_index(cluster, freq); + + if (perf < 0 || perf >= MAX_OPPS) + return -EINVAL; + + ret = vexpress_config_write(perf_func, offset, perf); + + return ret; +} +EXPORT_SYMBOL_GPL(vexpress_spc_set_performance); + +static void vexpress_spc_set_wake_intr(u32 mask) +{ + writel_relaxed(mask & VEXPRESS_SPC_WAKE_INTR_MASK, + info->baseaddr + WAKE_INT_MASK); +} + +static inline void reg_bitmask(u32 *reg, u32 mask, bool set) +{ + if (set) + *reg |= mask; + else + *reg &= ~mask; +} + +/** + * vexpress_spc_set_global_wakeup_intr() + * + * Function to set/clear global wakeup IRQs. Not protected by locking since + * it might be used in code paths where normal cacheable locks are not + * working. Locking must be provided by the caller to ensure atomicity. + * + * @set: if true, global wake-up IRQs are set, if false they are cleared + */ +void vexpress_spc_set_global_wakeup_intr(bool set) +{ + u32 wake_int_mask_reg = 0; + + wake_int_mask_reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK); + reg_bitmask(&wake_int_mask_reg, GBL_WAKEUP_INT_MSK, set); + vexpress_spc_set_wake_intr(wake_int_mask_reg); +} + +/** + * vexpress_spc_set_cpu_wakeup_irq() + * + * Function to set/clear per-CPU wake-up IRQs. Not protected by locking since + * it might be used in code paths where normal cacheable locks are not + * working. Locking must be provided by the caller to ensure atomicity. + * + * @cpu: mpidr[7:0] bitfield describing cpu affinity level + * @cluster: mpidr[15:8] bitfield describing cluster affinity level + * @set: if true, wake-up IRQs are set, if false they are cleared + */ +void vexpress_spc_set_cpu_wakeup_irq(u32 cpu, u32 cluster, bool set) +{ + u32 mask = 0; + u32 wake_int_mask_reg = 0; + + mask = 1 << cpu; + if (info->a15_clusid != cluster) + mask <<= 4; + + wake_int_mask_reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK); + reg_bitmask(&wake_int_mask_reg, mask, set); + vexpress_spc_set_wake_intr(wake_int_mask_reg); +} + +/** + * vexpress_spc_powerdown_enable() + * + * Function to enable/disable cluster powerdown. Not protected by locking + * since it might be used in code paths where normal cacheable locks are not + * working. Locking must be provided by the caller to ensure atomicity. + * + * @cluster: mpidr[15:8] bitfield describing cluster affinity level + * @enable: if true enables powerdown, if false disables it + */ +void vexpress_spc_powerdown_enable(u32 cluster, bool enable) +{ + u32 pwdrn_reg = 0; + + if (cluster >= MAX_CLUSTERS) + return; + pwdrn_reg = cluster != info->a15_clusid ? A7_PWRDN_EN : A15_PWRDN_EN; + writel_relaxed(enable, info->baseaddr + pwdrn_reg); +} + +irqreturn_t vexpress_spc_irq_handler(int irq, void *data) +{ + int ret; + u32 status = readl_relaxed(info->baseaddr + PWC_STATUS); + + if (!(status & RESPONSE_MASK(info->cur_req_type))) + return IRQ_NONE; + + if ((status == STAT_COMPLETE(SYS_CFGCTRL_TYPE)) + && vexpress_spc_config_data) { + *vexpress_spc_config_data = + readl_relaxed(info->baseaddr + SYS_CFG_RDATA); + vexpress_spc_config_data = NULL; + } + + ret = STAT_COMPLETE(info->cur_req_type) ? 0 : -EIO; + info->cur_req_type = INVALID_TYPE; + vexpress_config_complete(vexpress_spc_config_bridge, ret); + return IRQ_HANDLED; +} + +/** + * Based on the firmware documentation, this is always fixed to 20 + * All the 4 OSC: A15 PLL0/1, A7 PLL0/1 must be programmed same + * values for both control and value registers. + * This function uses A15 PLL 0 registers to compute multiple factor + * F out = F in * (CLKF + 1) / ((CLKOD + 1) * (CLKR + 1)) + */ +static inline int __get_mult_factor(void) +{ + int i_div, o_div, f_div; + u32 tmp; + + tmp = readl(info->baseaddr + SCC_CFGREG19); + f_div = (tmp >> CLKF_SHIFT) & CLKF_MASK; + + tmp = readl(info->baseaddr + SCC_CFGREG20); + o_div = (tmp >> CLKOD_SHIFT) & CLKOD_MASK; + i_div = (tmp >> CLKR_SHIFT) & CLKR_MASK; + + return (f_div + 1) / ((o_div + 1) * (i_div + 1)); +} + +/** + * vexpress_spc_populate_opps() - initialize opp tables from microcontroller + * + * @cluster: mpidr[15:8] bitfield describing cluster affinity level + * + * Return: 0 on success + * < 0 on error + */ +static int vexpress_spc_populate_opps(u32 cluster) +{ + u32 data = 0, ret, i, offset; + int mult_fact = __get_mult_factor(); + + if (WARN_ON_ONCE(cluster >= MAX_CLUSTERS)) + return -EINVAL; + + offset = cluster != info->a15_clusid ? OPP_A7_OFFSET : OPP_A15_OFFSET; + for (i = 0; i < MAX_OPPS; i++) { + ret = vexpress_config_read(opp_func, i + offset, &data); + if (!ret) + info->freqs[cluster][i] = (data & 0xFFFFF) * mult_fact; + else + break; + } + + info->freqs_cnt[cluster] = i; + return ret; +} + +/** + * vexpress_spc_get_freq_table() - Retrieve a pointer to the frequency + * table for a given cluster + * + * @cluster: mpidr[15:8] bitfield describing cluster affinity level + * @fptr: pointer to be initialized + * Return: operating points count on success + * -EINVAL on pointer error + */ +int vexpress_spc_get_freq_table(u32 cluster, u32 **fptr) +{ + if (WARN_ON_ONCE(!fptr || cluster >= MAX_CLUSTERS)) + return -EINVAL; + *fptr = info->freqs[cluster]; + return info->freqs_cnt[cluster]; +} +EXPORT_SYMBOL_GPL(vexpress_spc_get_freq_table); + +static void *vexpress_spc_func_get(struct device *dev, + struct device_node *node, const char *id) +{ + struct vexpress_spc_func *spc_func; + u32 func_device[2]; + int err = 0; + + spc_func = kzalloc(sizeof(*spc_func), GFP_KERNEL); + if (!spc_func) + return NULL; + + if (strcmp(id, "opp") == 0) { + spc_func->type = CONFIG_FUNC; + spc_func->function = OPP_FUNCTION; + spc_func->device = OPP_BASE_DEVICE; + } else if (strcmp(id, "perf") == 0) { + spc_func->type = PERF_FUNC; + } else if (node) { + of_node_get(node); + err = of_property_read_u32_array(node, + "arm,vexpress-sysreg,func", func_device, + ARRAY_SIZE(func_device)); + of_node_put(node); + spc_func->type = CONFIG_FUNC; + spc_func->function = func_device[0]; + spc_func->device = func_device[1]; + } + + if (WARN_ON(err)) { + kfree(spc_func); + return NULL; + } + + pr_debug("func 0x%p = 0x%x, %d %d\n", spc_func, + spc_func->function, + spc_func->device, + spc_func->type); + + return spc_func; +} + +static void vexpress_spc_func_put(void *func) +{ + kfree(func); +} + +static int vexpress_spc_func_exec(void *func, int offset, bool write, + u32 *data) +{ + struct vexpress_spc_func *spc_func = func; + u32 command; + + if (!data) + return -EINVAL; + /* + * Setting and retrieval of operating points is not part of + * DCC config interface. It was made to go through the same + * code path so that requests to the M3 can be serialized + * properly with config reads/writes through the common + * vexpress config interface + */ + switch (spc_func->type) { + case PERF_FUNC: + if (write) { + info->cur_req_type = (offset == PERF_LVL_A15) ? + A15_OPP_TYPE : A7_OPP_TYPE; + writel_relaxed(*data, info->baseaddr + offset); + return VEXPRESS_CONFIG_STATUS_WAIT; + } else { + *data = readl_relaxed(info->baseaddr + offset); + return VEXPRESS_CONFIG_STATUS_DONE; + } + case CONFIG_FUNC: + info->cur_req_type = SYS_CFGCTRL_TYPE; + + command = SYS_CFGCTRL_START; + command |= write ? SYS_CFGCTRL_WRITE : 0; + command |= SYS_CFGCTRL_FUNC(spc_func->function); + command |= SYS_CFGCTRL_DEVICE(spc_func->device + offset); + + pr_debug("command %x\n", command); + + if (!write) + vexpress_spc_config_data = data; + else + writel_relaxed(*data, info->baseaddr + SYS_CFG_WDATA); + writel_relaxed(command, info->baseaddr + SYS_CFGCTRL); + + return VEXPRESS_CONFIG_STATUS_WAIT; + default: + return -EINVAL; + } +} + +struct vexpress_config_bridge_info vexpress_spc_config_bridge_info = { + .name = "vexpress-spc", + .func_get = vexpress_spc_func_get, + .func_put = vexpress_spc_func_put, + .func_exec = vexpress_spc_func_exec, +}; + +static const struct of_device_id vexpress_spc_ids[] __initconst = { + { .compatible = "arm,vexpress-spc,v2p-ca15_a7" }, + { .compatible = "arm,vexpress-spc" }, + {}, +}; + +static int __init vexpress_spc_init(void) +{ + int ret; + struct device_node *node = of_find_matching_node(NULL, + vexpress_spc_ids); + + if (!node) + return -ENODEV; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + pr_err("%s: unable to allocate mem\n", __func__); + return -ENOMEM; + } + info->cur_req_type = INVALID_TYPE; + + info->baseaddr = of_iomap(node, 0); + if (WARN_ON(!info->baseaddr)) { + ret = -ENXIO; + goto mem_free; + } + + info->irq = irq_of_parse_and_map(node, 0); + + if (WARN_ON(!info->irq)) { + ret = -ENXIO; + goto unmap; + } + + readl_relaxed(info->baseaddr + PWC_STATUS); + + ret = request_irq(info->irq, vexpress_spc_irq_handler, + IRQF_DISABLED | IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + "arm-spc", info); + + if (ret) { + pr_err("IRQ %d request failed\n", info->irq); + ret = -ENODEV; + goto unmap; + } + + info->a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf; + + vexpress_spc_config_bridge = vexpress_config_bridge_register( + node, &vexpress_spc_config_bridge_info); + + if (WARN_ON(!vexpress_spc_config_bridge)) { + ret = -ENODEV; + goto unmap; + } + + opp_func = vexpress_config_func_get(vexpress_spc_config_bridge, "opp"); + perf_func = + vexpress_config_func_get(vexpress_spc_config_bridge, "perf"); + + if (!opp_func || !perf_func) { + ret = -ENODEV; + goto unmap; + } + + if (vexpress_spc_populate_opps(0) || vexpress_spc_populate_opps(1)) { + if (info->irq) + free_irq(info->irq, info); + pr_err("failed to build OPP table\n"); + ret = -ENODEV; + goto unmap; + } + /* + * Multi-cluster systems may need this data when non-coherent, during + * cluster power-up/power-down. Make sure it reaches main memory: + */ + sync_cache_w(info); + sync_cache_w(&info); + pr_info("vexpress-spc loaded at %p\n", info->baseaddr); + return 0; + +unmap: + iounmap(info->baseaddr); + +mem_free: + kfree(info); + return ret; +} + +static bool __init __vexpress_spc_check_loaded(void); +/* + * Pointer spc_check_loaded is swapped after init hence it is safe + * to initialize it to a function in the __init section + */ +static bool (*spc_check_loaded)(void) __refdata = &__vexpress_spc_check_loaded; + +static bool __init __vexpress_spc_check_loaded(void) +{ + if (vexpress_spc_load_result == -EAGAIN) + vexpress_spc_load_result = vexpress_spc_init(); + spc_check_loaded = &vexpress_spc_initialized; + return vexpress_spc_initialized(); +} + +/* + * Function exported to manage early_initcall ordering. + * SPC code is needed very early in the boot process + * to bring CPUs out of reset and initialize power + * management back-end. After boot swap pointers to + * make the functionality check available to loadable + * modules, when early boot init functions have been + * already freed from kernel address space. + */ +bool vexpress_spc_check_loaded(void) +{ + return spc_check_loaded(); +} +EXPORT_SYMBOL_GPL(vexpress_spc_check_loaded); + +static int __init vexpress_spc_early_init(void) +{ + __vexpress_spc_check_loaded(); + return vexpress_spc_load_result; +} +early_initcall(vexpress_spc_early_init); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Serial Power Controller (SPC) support"); diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c index 96a020b1dcd..7f429afce11 100644 --- a/drivers/mfd/vexpress-sysreg.c +++ b/drivers/mfd/vexpress-sysreg.c @@ -165,7 +165,7 @@ static u32 *vexpress_sysreg_config_data; static int vexpress_sysreg_config_tries; static void *vexpress_sysreg_config_func_get(struct device *dev, - struct device_node *node) + struct device_node *node, const char *id) { struct vexpress_sysreg_config_func *config_func; u32 site; @@ -351,6 +351,8 @@ void __init vexpress_sysreg_of_early_init(void) } +#ifdef CONFIG_GPIOLIB + #define VEXPRESS_SYSREG_GPIO(_name, _reg, _value) \ [VEXPRESS_GPIO_##_name] = { \ .reg = _reg, \ @@ -445,6 +447,8 @@ struct gpio_led_platform_data vexpress_sysreg_leds_pdata = { .leds = vexpress_sysreg_leds, }; +#endif + static ssize_t vexpress_sysreg_sys_id_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -480,6 +484,9 @@ static int vexpress_sysreg_probe(struct platform_device *pdev) setup_timer(&vexpress_sysreg_config_timer, vexpress_sysreg_config_complete, 0); + vexpress_sysreg_dev = &pdev->dev; + +#ifdef CONFIG_GPIOLIB vexpress_sysreg_gpio_chip.dev = &pdev->dev; err = gpiochip_add(&vexpress_sysreg_gpio_chip); if (err) { @@ -490,11 +497,10 @@ static int vexpress_sysreg_probe(struct platform_device *pdev) return err; } - vexpress_sysreg_dev = &pdev->dev; - platform_device_register_data(vexpress_sysreg_dev, "leds-gpio", PLATFORM_DEVID_AUTO, &vexpress_sysreg_leds_pdata, sizeof(vexpress_sysreg_leds_pdata)); +#endif device_create_file(vexpress_sysreg_dev, &dev_attr_sys_id); diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index dfbf978315d..bdd703c6bf1 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -1896,7 +1896,7 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr, SMC_SELECT_BANK(lp, 1); val = SMC_GET_BASE(lp); val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT; - if (((unsigned int)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) { + if (((unsigned long)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) { printk("%s: IOADDR %p doesn't match configuration (%x).\n", CARDNAME, ioaddr, val); } diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig index 349e9ae8090..ee039dcead0 100644 --- a/drivers/power/reset/Kconfig +++ b/drivers/power/reset/Kconfig @@ -32,7 +32,8 @@ config POWER_RESET_RESTART user presses a key. u-boot then boots into Linux. config POWER_RESET_VEXPRESS - bool + bool "ARM Versatile Express power-off and reset driver" + depends on ARM || ARM64 depends on POWER_RESET help Power off and reset support for the ARM Ltd. Versatile diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 2e937bdace6..d0d9c5ea9e3 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -39,6 +39,11 @@ config VIDEOMODE_HELPERS config HDMI bool +config VEXPRESS_DVI_CONTROL + bool "Versatile Express DVI control" + depends on FB && VEXPRESS_CONFIG + default y + menuconfig FB tristate "Support for frame buffer devices" ---help--- @@ -326,6 +331,21 @@ config FB_ARMCLCD here and read <file:Documentation/kbuild/modules.txt>. The module will be called amba-clcd. +config FB_ARMHDLCD + tristate "ARM High Definition LCD support" + depends on FB && ARM + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + help + This framebuffer device driver is for the ARM High Definition + Colour LCD controller. + + If you want to compile this as a module (=code which can be + inserted into and removed from the running kernel), say M + here and read <file:Documentation/kbuild/modules.txt>. The module + will be called arm-hdlcd. + config FB_ACORN bool "Acorn VIDC support" depends on (FB = y) && ARM && ARCH_ACORN diff --git a/drivers/video/Makefile b/drivers/video/Makefile index e8bae8dd480..33869eea498 100644 --- a/drivers/video/Makefile +++ b/drivers/video/Makefile @@ -99,6 +99,7 @@ obj-$(CONFIG_FB_ATMEL) += atmel_lcdfb.o obj-$(CONFIG_FB_PVR2) += pvr2fb.o obj-$(CONFIG_FB_VOODOO1) += sstfb.o obj-$(CONFIG_FB_ARMCLCD) += amba-clcd.o +obj-$(CONFIG_FB_ARMHDLCD) += arm-hdlcd.o obj-$(CONFIG_FB_GOLDFISH) += goldfishfb.o obj-$(CONFIG_FB_68328) += 68328fb.o obj-$(CONFIG_FB_GBE) += gbefb.o @@ -177,3 +178,6 @@ obj-$(CONFIG_VIDEOMODE_HELPERS) += display_timing.o videomode.o ifeq ($(CONFIG_OF),y) obj-$(CONFIG_VIDEOMODE_HELPERS) += of_display_timing.o of_videomode.o endif + +# platform specific output drivers +obj-$(CONFIG_VEXPRESS_DVI_CONTROL) += vexpress-dvi.o diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c index 0a2cce7285b..94a1998338d 100644 --- a/drivers/video/amba-clcd.c +++ b/drivers/video/amba-clcd.c @@ -16,7 +16,10 @@ #include <linux/string.h> #include <linux/slab.h> #include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/memblock.h> #include <linux/mm.h> +#include <linux/of.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/ioport.h> @@ -30,6 +33,16 @@ #define to_clcd(info) container_of(info, struct clcd_fb, fb) +#ifdef CONFIG_ARM +#define clcdfb_dma_alloc dma_alloc_writecombine +#define clcdfb_dma_free dma_free_writecombine +#define clcdfb_dma_mmap dma_mmap_writecombine +#else +#define clcdfb_dma_alloc dma_alloc_coherent +#define clcdfb_dma_free dma_free_coherent +#define clcdfb_dma_mmap dma_mmap_coherent +#endif + /* This is limited to 16 characters when displayed by X startup */ static const char *clcd_name = "CLCD FB"; @@ -392,6 +405,44 @@ static int clcdfb_blank(int blank_mode, struct fb_info *info) return 0; } +int clcdfb_mmap_dma(struct clcd_fb *fb, struct vm_area_struct *vma) +{ + return clcdfb_dma_mmap(&fb->dev->dev, vma, + fb->fb.screen_base, + fb->fb.fix.smem_start, + fb->fb.fix.smem_len); +} + +int clcdfb_mmap_io(struct clcd_fb *fb, struct vm_area_struct *vma) +{ + unsigned long user_count, count, pfn, off; + + user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + count = PAGE_ALIGN(fb->fb.fix.smem_len) >> PAGE_SHIFT; + pfn = fb->fb.fix.smem_start >> PAGE_SHIFT; + off = vma->vm_pgoff; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + if (off < count && user_count <= (count - off)) + return remap_pfn_range(vma, vma->vm_start, pfn + off, + user_count << PAGE_SHIFT, + vma->vm_page_prot); + + return -ENXIO; +} + +void clcdfb_remove_dma(struct clcd_fb *fb) +{ + clcdfb_dma_free(&fb->dev->dev, fb->fb.fix.smem_len, + fb->fb.screen_base, fb->fb.fix.smem_start); +} + +void clcdfb_remove_io(struct clcd_fb *fb) +{ + iounmap(fb->fb.screen_base); +} + static int clcdfb_mmap(struct fb_info *info, struct vm_area_struct *vma) { @@ -542,14 +593,239 @@ static int clcdfb_register(struct clcd_fb *fb) return ret; } +struct string_lookup { + const char *string; + const u32 val; +}; + +static struct string_lookup vmode_lookups[] = { + { "FB_VMODE_NONINTERLACED", FB_VMODE_NONINTERLACED}, + { "FB_VMODE_INTERLACED", FB_VMODE_INTERLACED}, + { "FB_VMODE_DOUBLE", FB_VMODE_DOUBLE}, + { "FB_VMODE_ODD_FLD_FIRST", FB_VMODE_ODD_FLD_FIRST}, + { NULL, 0 }, +}; + +static struct string_lookup tim2_lookups[] = { + { "TIM2_CLKSEL", TIM2_CLKSEL}, + { "TIM2_IVS", TIM2_IVS}, + { "TIM2_IHS", TIM2_IHS}, + { "TIM2_IPC", TIM2_IPC}, + { "TIM2_IOE", TIM2_IOE}, + { "TIM2_BCD", TIM2_BCD}, + { NULL, 0}, +}; +static struct string_lookup cntl_lookups[] = { + {"CNTL_LCDEN", CNTL_LCDEN}, + {"CNTL_LCDBPP1", CNTL_LCDBPP1}, + {"CNTL_LCDBPP2", CNTL_LCDBPP2}, + {"CNTL_LCDBPP4", CNTL_LCDBPP4}, + {"CNTL_LCDBPP8", CNTL_LCDBPP8}, + {"CNTL_LCDBPP16", CNTL_LCDBPP16}, + {"CNTL_LCDBPP16_565", CNTL_LCDBPP16_565}, + {"CNTL_LCDBPP16_444", CNTL_LCDBPP16_444}, + {"CNTL_LCDBPP24", CNTL_LCDBPP24}, + {"CNTL_LCDBW", CNTL_LCDBW}, + {"CNTL_LCDTFT", CNTL_LCDTFT}, + {"CNTL_LCDMONO8", CNTL_LCDMONO8}, + {"CNTL_LCDDUAL", CNTL_LCDDUAL}, + {"CNTL_BGR", CNTL_BGR}, + {"CNTL_BEBO", CNTL_BEBO}, + {"CNTL_BEPO", CNTL_BEPO}, + {"CNTL_LCDPWR", CNTL_LCDPWR}, + {"CNTL_LCDVCOMP(1)", CNTL_LCDVCOMP(1)}, + {"CNTL_LCDVCOMP(2)", CNTL_LCDVCOMP(2)}, + {"CNTL_LCDVCOMP(3)", CNTL_LCDVCOMP(3)}, + {"CNTL_LCDVCOMP(4)", CNTL_LCDVCOMP(4)}, + {"CNTL_LCDVCOMP(5)", CNTL_LCDVCOMP(5)}, + {"CNTL_LCDVCOMP(6)", CNTL_LCDVCOMP(6)}, + {"CNTL_LCDVCOMP(7)", CNTL_LCDVCOMP(7)}, + {"CNTL_LDMAFIFOTIME", CNTL_LDMAFIFOTIME}, + {"CNTL_WATERMARK", CNTL_WATERMARK}, + { NULL, 0}, +}; +static struct string_lookup caps_lookups[] = { + {"CLCD_CAP_RGB444", CLCD_CAP_RGB444}, + {"CLCD_CAP_RGB5551", CLCD_CAP_RGB5551}, + {"CLCD_CAP_RGB565", CLCD_CAP_RGB565}, + {"CLCD_CAP_RGB888", CLCD_CAP_RGB888}, + {"CLCD_CAP_BGR444", CLCD_CAP_BGR444}, + {"CLCD_CAP_BGR5551", CLCD_CAP_BGR5551}, + {"CLCD_CAP_BGR565", CLCD_CAP_BGR565}, + {"CLCD_CAP_BGR888", CLCD_CAP_BGR888}, + {"CLCD_CAP_444", CLCD_CAP_444}, + {"CLCD_CAP_5551", CLCD_CAP_5551}, + {"CLCD_CAP_565", CLCD_CAP_565}, + {"CLCD_CAP_888", CLCD_CAP_888}, + {"CLCD_CAP_RGB", CLCD_CAP_RGB}, + {"CLCD_CAP_BGR", CLCD_CAP_BGR}, + {"CLCD_CAP_ALL", CLCD_CAP_ALL}, + { NULL, 0}, +}; + +u32 parse_setting(struct string_lookup *lookup, const char *name) +{ + int i = 0; + while (lookup[i].string != NULL) { + if (strcmp(lookup[i].string, name) == 0) + return lookup[i].val; + ++i; + } + return -EINVAL; +} + +u32 get_string_lookup(struct device_node *node, const char *name, + struct string_lookup *lookup) +{ + const char *string; + int count, i, ret = 0; + + count = of_property_count_strings(node, name); + if (count >= 0) + for (i = 0; i < count; i++) + if (of_property_read_string_index(node, name, i, + &string) == 0) + ret |= parse_setting(lookup, string); + return ret; +} + +int get_val(struct device_node *node, const char *string) +{ + u32 ret = 0; + + if (of_property_read_u32(node, string, &ret)) + ret = -1; + return ret; +} + +struct clcd_panel *getPanel(struct device_node *node) +{ + static struct clcd_panel panel; + + panel.mode.refresh = get_val(node, "refresh"); + panel.mode.xres = get_val(node, "xres"); + panel.mode.yres = get_val(node, "yres"); + panel.mode.pixclock = get_val(node, "pixclock"); + panel.mode.left_margin = get_val(node, "left_margin"); + panel.mode.right_margin = get_val(node, "right_margin"); + panel.mode.upper_margin = get_val(node, "upper_margin"); + panel.mode.lower_margin = get_val(node, "lower_margin"); + panel.mode.hsync_len = get_val(node, "hsync_len"); + panel.mode.vsync_len = get_val(node, "vsync_len"); + panel.mode.sync = get_val(node, "sync"); + panel.bpp = get_val(node, "bpp"); + panel.width = (signed short) get_val(node, "width"); + panel.height = (signed short) get_val(node, "height"); + + panel.mode.vmode = get_string_lookup(node, "vmode", vmode_lookups); + panel.tim2 = get_string_lookup(node, "tim2", tim2_lookups); + panel.cntl = get_string_lookup(node, "cntl", cntl_lookups); + panel.caps = get_string_lookup(node, "caps", caps_lookups); + + return &panel; +} + +struct clcd_panel *clcdfb_get_panel(const char *name) +{ + struct device_node *node = NULL; + const char *mode; + struct clcd_panel *panel = NULL; + + do { + node = of_find_compatible_node(node, NULL, "panel"); + if (node) + if (of_property_read_string(node, "mode", &mode) == 0) + if (strcmp(mode, name) == 0) { + panel = getPanel(node); + panel->mode.name = name; + } + } while (node != NULL); + + return panel; +} + +#ifdef CONFIG_OF +static int clcdfb_dt_init(struct clcd_fb *fb) +{ + int err = 0; + struct device_node *node; + const char *mode; + dma_addr_t dma; + u32 use_dma; + const __be32 *prop; + int len, na, ns; + phys_addr_t fb_base, fb_size; + + node = fb->dev->dev.of_node; + if (!node) + return -ENODEV; + + na = of_n_addr_cells(node); + ns = of_n_size_cells(node); + + if (WARN_ON(of_property_read_string(node, "mode", &mode))) + return -ENODEV; + + fb->panel = clcdfb_get_panel(mode); + if (!fb->panel) + return -EINVAL; + fb->fb.fix.smem_len = fb->panel->mode.xres * fb->panel->mode.yres * 2; + + fb->board->name = "Device Tree CLCD PL111"; + fb->board->caps = CLCD_CAP_5551 | CLCD_CAP_565; + fb->board->check = clcdfb_check; + fb->board->decode = clcdfb_decode; + + if (of_property_read_u32(node, "use_dma", &use_dma)) + use_dma = 0; + + if (use_dma) { + fb->fb.screen_base = clcdfb_dma_alloc(&fb->dev->dev, + fb->fb.fix.smem_len, + &dma, GFP_KERNEL); + if (!fb->fb.screen_base) { + pr_err("CLCD: unable to map framebuffer\n"); + return -ENOMEM; + } + + fb->fb.fix.smem_start = dma; + fb->board->mmap = clcdfb_mmap_dma; + fb->board->remove = clcdfb_remove_dma; + } else { + prop = of_get_property(node, "framebuffer", &len); + if (WARN_ON(!prop || len < (na + ns) * sizeof(*prop))) + return -EINVAL; + + fb_base = of_read_number(prop, na); + fb_size = of_read_number(prop + na, ns); + + fb->fb.fix.smem_start = fb_base; + fb->fb.screen_base = ioremap_wc(fb_base, fb_size); + fb->board->mmap = clcdfb_mmap_io; + fb->board->remove = clcdfb_remove_io; + } + + return err; +} +#endif /* CONFIG_OF */ + static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id) { struct clcd_board *board = dev->dev.platform_data; struct clcd_fb *fb; int ret; - if (!board) - return -EINVAL; + if (!board) { +#ifdef CONFIG_OF + if (dev->dev.of_node) { + board = kzalloc(sizeof(struct clcd_board), GFP_KERNEL); + if (!board) + return -ENOMEM; + board->setup = clcdfb_dt_init; + } else +#endif + return -EINVAL; + } ret = amba_request_regions(dev, NULL); if (ret) { diff --git a/drivers/video/arm-hdlcd.c b/drivers/video/arm-hdlcd.c new file mode 100644 index 00000000000..cfd631e3dc5 --- /dev/null +++ b/drivers/video/arm-hdlcd.c @@ -0,0 +1,844 @@ +/* + * drivers/video/arm-hdlcd.c + * + * Copyright (C) 2011 ARM Limited + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + * + * ARM HDLCD Controller + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/ctype.h> +#include <linux/mm.h> +#include <linux/delay.h> +#include <linux/of.h> +#include <linux/fb.h> +#include <linux/clk.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/dma-mapping.h> +#include <linux/platform_device.h> +#include <linux/memblock.h> +#include <linux/arm-hdlcd.h> +#ifdef HDLCD_COUNT_BUFFERUNDERRUNS +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#endif + +#include "edid.h" + +#ifdef CONFIG_SERIAL_AMBA_PCU_UART +int get_edid(u8 *msgbuf); +#else +#endif + +#define to_hdlcd_device(info) container_of(info, struct hdlcd_device, fb) + +static struct of_device_id hdlcd_of_matches[] = { + { .compatible = "arm,hdlcd" }, + {}, +}; + +/* Framebuffer size. */ +static unsigned long framebuffer_size; + +#ifdef HDLCD_COUNT_BUFFERUNDERRUNS +static unsigned long buffer_underrun_events; +static DEFINE_SPINLOCK(hdlcd_underrun_lock); + +static void hdlcd_underrun_set(unsigned long val) +{ + spin_lock(&hdlcd_underrun_lock); + buffer_underrun_events = val; + spin_unlock(&hdlcd_underrun_lock); +} + +static unsigned long hdlcd_underrun_get(void) +{ + unsigned long val; + spin_lock(&hdlcd_underrun_lock); + val = buffer_underrun_events; + spin_unlock(&hdlcd_underrun_lock); + return val; +} + +#ifdef CONFIG_PROC_FS +static int hdlcd_underrun_show(struct seq_file *m, void *v) +{ + unsigned char underrun_string[32]; + snprintf(underrun_string, 32, "%lu\n", hdlcd_underrun_get()); + seq_puts(m, underrun_string); + return 0; +} + +static int proc_hdlcd_underrun_open(struct inode *inode, struct file *file) +{ + return single_open(file, hdlcd_underrun_show, NULL); +} + +static const struct file_operations proc_hdlcd_underrun_operations = { + .open = proc_hdlcd_underrun_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int hdlcd_underrun_init(void) +{ + hdlcd_underrun_set(0); + proc_create("hdlcd_underrun", 0, NULL, &proc_hdlcd_underrun_operations); + return 0; +} +static void hdlcd_underrun_close(void) +{ + remove_proc_entry("hdlcd_underrun", NULL); +} +#else +static int hdlcd_underrun_init(void) { return 0; } +static void hdlcd_underrun_close(void) { } +#endif +#endif + +static char *fb_mode = "1680x1050-32@60\0\0\0\0\0"; + +static struct fb_var_screeninfo cached_var_screeninfo; + +static struct fb_videomode hdlcd_default_mode = { + .refresh = 60, + .xres = 1680, + .yres = 1050, + .pixclock = 8403, + .left_margin = 80, + .right_margin = 48, + .upper_margin = 21, + .lower_margin = 3, + .hsync_len = 32, + .vsync_len = 6, + .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + .vmode = FB_VMODE_NONINTERLACED +}; + +static inline void hdlcd_enable(struct hdlcd_device *hdlcd) +{ + dev_dbg(hdlcd->dev, "HDLCD: output enabled\n"); + writel(1, hdlcd->base + HDLCD_REG_COMMAND); +} + +static inline void hdlcd_disable(struct hdlcd_device *hdlcd) +{ + dev_dbg(hdlcd->dev, "HDLCD: output disabled\n"); + writel(0, hdlcd->base + HDLCD_REG_COMMAND); +} + +static int hdlcd_set_bitfields(struct hdlcd_device *hdlcd, + struct fb_var_screeninfo *var) +{ + int ret = 0; + + memset(&var->transp, 0, sizeof(var->transp)); + var->red.msb_right = 0; + var->green.msb_right = 0; + var->blue.msb_right = 0; + var->blue.offset = 0; + + switch (var->bits_per_pixel) { + case 8: + /* pseudocolor */ + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + break; + case 16: + /* 565 format */ + var->red.length = 5; + var->green.length = 6; + var->blue.length = 5; + break; + case 32: + var->transp.length = 8; + case 24: + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + break; + default: + ret = -EINVAL; + break; + } + + if (!ret) { + if(var->bits_per_pixel != 32) + { + var->green.offset = var->blue.length; + var->red.offset = var->green.offset + var->green.length; + } + else + { + /* Previously, the byte ordering for 32-bit color was + * (msb)<alpha><red><green><blue>(lsb) + * but this does not match what android expects and + * the colors are odd. Instead, use + * <alpha><blue><green><red> + * Since we tell fb what we are doing, console + * , X and directfb access should work fine. + */ + var->green.offset = var->red.length; + var->blue.offset = var->green.offset + var->green.length; + var->transp.offset = var->blue.offset + var->blue.length; + } + } + + return ret; +} + +static int hdlcd_check_var(struct fb_var_screeninfo *var, struct fb_info *info) +{ + struct hdlcd_device *hdlcd = to_hdlcd_device(info); + int bytes_per_pixel = var->bits_per_pixel / 8; + +#ifdef HDLCD_NO_VIRTUAL_SCREEN + var->yres_virtual = var->yres; +#else + var->yres_virtual = 2 * var->yres; +#endif + + if ((var->xres_virtual * bytes_per_pixel * var->yres_virtual) > hdlcd->fb.fix.smem_len) + return -ENOMEM; + + if (var->xres > HDLCD_MAX_XRES || var->yres > HDLCD_MAX_YRES) + return -EINVAL; + + /* make sure the bitfields are set appropriately */ + return hdlcd_set_bitfields(hdlcd, var); +} + +/* prototype */ +static int hdlcd_pan_display(struct fb_var_screeninfo *var, + struct fb_info *info); + +#define WRITE_HDLCD_REG(reg, value) writel((value), hdlcd->base + (reg)) +#define READ_HDLCD_REG(reg) readl(hdlcd->base + (reg)) + +static int hdlcd_set_par(struct fb_info *info) +{ + struct hdlcd_device *hdlcd = to_hdlcd_device(info); + int bytes_per_pixel = hdlcd->fb.var.bits_per_pixel / 8; + int polarities; + int old_yoffset; + + /* check for shortcuts */ + old_yoffset = cached_var_screeninfo.yoffset; + cached_var_screeninfo.yoffset = info->var.yoffset; + if (!memcmp(&info->var, &cached_var_screeninfo, + sizeof(struct fb_var_screeninfo))) { + if(old_yoffset != info->var.yoffset) { + /* we only changed yoffset, and we already + * already recorded it a couple lines up + */ + hdlcd_pan_display(&info->var, info); + } + /* or no change */ + return 0; + } + + hdlcd->fb.fix.line_length = hdlcd->fb.var.xres * bytes_per_pixel; + + if (hdlcd->fb.var.bits_per_pixel >= 16) + hdlcd->fb.fix.visual = FB_VISUAL_TRUECOLOR; + else + hdlcd->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR; + + memcpy(&cached_var_screeninfo, &info->var, sizeof(struct fb_var_screeninfo)); + + polarities = HDLCD_POLARITY_DATAEN | +#ifndef CONFIG_ARCH_TUSCAN + HDLCD_POLARITY_PIXELCLK | +#endif + HDLCD_POLARITY_DATA; + polarities |= (hdlcd->fb.var.sync & FB_SYNC_HOR_HIGH_ACT) ? HDLCD_POLARITY_HSYNC : 0; + polarities |= (hdlcd->fb.var.sync & FB_SYNC_VERT_HIGH_ACT) ? HDLCD_POLARITY_VSYNC : 0; + + hdlcd_disable(hdlcd); + + WRITE_HDLCD_REG(HDLCD_REG_FB_LINE_LENGTH, hdlcd->fb.var.xres * bytes_per_pixel); + WRITE_HDLCD_REG(HDLCD_REG_FB_LINE_PITCH, hdlcd->fb.var.xres * bytes_per_pixel); + WRITE_HDLCD_REG(HDLCD_REG_FB_LINE_COUNT, hdlcd->fb.var.yres - 1); + WRITE_HDLCD_REG(HDLCD_REG_V_SYNC, hdlcd->fb.var.vsync_len - 1); + WRITE_HDLCD_REG(HDLCD_REG_V_BACK_PORCH, hdlcd->fb.var.upper_margin - 1); + WRITE_HDLCD_REG(HDLCD_REG_V_DATA, hdlcd->fb.var.yres - 1); + WRITE_HDLCD_REG(HDLCD_REG_V_FRONT_PORCH, hdlcd->fb.var.lower_margin - 1); + WRITE_HDLCD_REG(HDLCD_REG_H_SYNC, hdlcd->fb.var.hsync_len - 1); + WRITE_HDLCD_REG(HDLCD_REG_H_BACK_PORCH, hdlcd->fb.var.left_margin - 1); + WRITE_HDLCD_REG(HDLCD_REG_H_DATA, hdlcd->fb.var.xres - 1); + WRITE_HDLCD_REG(HDLCD_REG_H_FRONT_PORCH, hdlcd->fb.var.right_margin - 1); + WRITE_HDLCD_REG(HDLCD_REG_POLARITIES, polarities); + WRITE_HDLCD_REG(HDLCD_REG_PIXEL_FORMAT, (bytes_per_pixel - 1) << 3); +#ifdef HDLCD_RED_DEFAULT_COLOUR + WRITE_HDLCD_REG(HDLCD_REG_RED_SELECT, (0x00ff0000 | (hdlcd->fb.var.red.length & 0xf) << 8) \ + | hdlcd->fb.var.red.offset); +#else + WRITE_HDLCD_REG(HDLCD_REG_RED_SELECT, ((hdlcd->fb.var.red.length & 0xf) << 8) | hdlcd->fb.var.red.offset); +#endif + WRITE_HDLCD_REG(HDLCD_REG_GREEN_SELECT, ((hdlcd->fb.var.green.length & 0xf) << 8) | hdlcd->fb.var.green.offset); + WRITE_HDLCD_REG(HDLCD_REG_BLUE_SELECT, ((hdlcd->fb.var.blue.length & 0xf) << 8) | hdlcd->fb.var.blue.offset); + + clk_set_rate(hdlcd->clk, (1000000000 / hdlcd->fb.var.pixclock) * 1000); + clk_enable(hdlcd->clk); + + hdlcd_enable(hdlcd); + + return 0; +} + +static int hdlcd_setcolreg(unsigned int regno, unsigned int red, unsigned int green, + unsigned int blue, unsigned int transp, struct fb_info *info) +{ + if (regno < 16) { + u32 *pal = info->pseudo_palette; + + pal[regno] = ((red >> 8) << info->var.red.offset) | + ((green >> 8) << info->var.green.offset) | + ((blue >> 8) << info->var.blue.offset); + } + + return 0; +} + +static irqreturn_t hdlcd_irq(int irq, void *data) +{ + struct hdlcd_device *hdlcd = data; + unsigned long irq_mask, irq_status; + + irq_mask = READ_HDLCD_REG(HDLCD_REG_INT_MASK); + irq_status = READ_HDLCD_REG(HDLCD_REG_INT_STATUS); + + /* acknowledge interrupt(s) */ + WRITE_HDLCD_REG(HDLCD_REG_INT_CLEAR, irq_status); +#ifdef HDLCD_COUNT_BUFFERUNDERRUNS + if (irq_status & HDLCD_INTERRUPT_UNDERRUN) { + /* increment the count */ + hdlcd_underrun_set(hdlcd_underrun_get() + 1); + } +#endif + if (irq_status & HDLCD_INTERRUPT_VSYNC) { + /* disable future VSYNC interrupts */ + WRITE_HDLCD_REG(HDLCD_REG_INT_MASK, irq_mask & ~HDLCD_INTERRUPT_VSYNC); + + complete(&hdlcd->vsync_completion); + } + + return IRQ_HANDLED; +} + +static int hdlcd_wait_for_vsync(struct fb_info *info) +{ + struct hdlcd_device *hdlcd = to_hdlcd_device(info); + unsigned long irq_mask; + int err; + + /* enable VSYNC interrupt */ + irq_mask = READ_HDLCD_REG(HDLCD_REG_INT_MASK); + WRITE_HDLCD_REG(HDLCD_REG_INT_MASK, irq_mask | HDLCD_INTERRUPT_VSYNC); + + err = wait_for_completion_interruptible_timeout(&hdlcd->vsync_completion, + msecs_to_jiffies(100)); + + if (!err) + return -ETIMEDOUT; + + return 0; +} + +static int hdlcd_blank(int blank_mode, struct fb_info *info) +{ + struct hdlcd_device *hdlcd = to_hdlcd_device(info); + + switch (blank_mode) { + case FB_BLANK_POWERDOWN: + clk_disable(hdlcd->clk); + case FB_BLANK_NORMAL: + hdlcd_disable(hdlcd); + break; + case FB_BLANK_UNBLANK: + clk_enable(hdlcd->clk); + hdlcd_enable(hdlcd); + break; + case FB_BLANK_VSYNC_SUSPEND: + case FB_BLANK_HSYNC_SUSPEND: + default: + return 1; + } + + return 0; +} + +static void hdlcd_mmap_open(struct vm_area_struct *vma) +{ +} + +static void hdlcd_mmap_close(struct vm_area_struct *vma) +{ +} + +static struct vm_operations_struct hdlcd_mmap_ops = { + .open = hdlcd_mmap_open, + .close = hdlcd_mmap_close, +}; + +static int hdlcd_mmap(struct fb_info *info, struct vm_area_struct *vma) +{ + struct hdlcd_device *hdlcd = to_hdlcd_device(info); + unsigned long off; + unsigned long start; + unsigned long len = hdlcd->fb.fix.smem_len; + + if (vma->vm_end - vma->vm_start == 0) + return 0; + if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) + return -EINVAL; + + off = vma->vm_pgoff << PAGE_SHIFT; + if ((off >= len) || (vma->vm_end - vma->vm_start + off) > len) + return -EINVAL; + + start = hdlcd->fb.fix.smem_start; + off += start; + + vma->vm_pgoff = off >> PAGE_SHIFT; + vma->vm_flags |= VM_IO; + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + vma->vm_ops = &hdlcd_mmap_ops; + if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) + return -EAGAIN; + + return 0; +} + +static int hdlcd_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) +{ + struct hdlcd_device *hdlcd = to_hdlcd_device(info); + + hdlcd->fb.var.yoffset = var->yoffset; + WRITE_HDLCD_REG(HDLCD_REG_FB_BASE, hdlcd->fb.fix.smem_start + + (var->yoffset * hdlcd->fb.fix.line_length)); + + hdlcd_wait_for_vsync(info); + + return 0; +} + +static int hdlcd_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) +{ + int err; + + switch (cmd) { + case FBIO_WAITFORVSYNC: + err = hdlcd_wait_for_vsync(info); + break; + default: + err = -ENOIOCTLCMD; + break; + } + + return err; +} + +static struct fb_ops hdlcd_ops = { + .owner = THIS_MODULE, + .fb_check_var = hdlcd_check_var, + .fb_set_par = hdlcd_set_par, + .fb_setcolreg = hdlcd_setcolreg, + .fb_blank = hdlcd_blank, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, + .fb_mmap = hdlcd_mmap, + .fb_pan_display = hdlcd_pan_display, + .fb_ioctl = hdlcd_ioctl, + .fb_compat_ioctl = hdlcd_ioctl +}; + +static int hdlcd_setup(struct hdlcd_device *hdlcd) +{ + u32 version; + int err = -EFAULT; + + hdlcd->fb.device = hdlcd->dev; + + hdlcd->clk = clk_get(hdlcd->dev, NULL); + if (IS_ERR(hdlcd->clk)) { + dev_err(hdlcd->dev, "HDLCD: unable to find clock data\n"); + return PTR_ERR(hdlcd->clk); + } + + err = clk_prepare(hdlcd->clk); + if (err) + goto clk_prepare_err; + + hdlcd->base = ioremap_nocache(hdlcd->fb.fix.mmio_start, hdlcd->fb.fix.mmio_len); + if (!hdlcd->base) { + dev_err(hdlcd->dev, "HDLCD: unable to map registers\n"); + goto remap_err; + } + + hdlcd->fb.pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL); + if (!hdlcd->fb.pseudo_palette) { + dev_err(hdlcd->dev, "HDLCD: unable to allocate pseudo_palette memory\n"); + err = -ENOMEM; + goto kmalloc_err; + } + + version = readl(hdlcd->base + HDLCD_REG_VERSION); + if ((version & HDLCD_PRODUCT_MASK) != HDLCD_PRODUCT_ID) { + dev_err(hdlcd->dev, "HDLCD: unknown product id: 0x%x\n", version); + err = -EINVAL; + goto kmalloc_err; + } + dev_info(hdlcd->dev, "HDLCD: found ARM HDLCD version r%dp%d\n", + (version & HDLCD_VERSION_MAJOR_MASK) >> 8, + version & HDLCD_VERSION_MINOR_MASK); + + strcpy(hdlcd->fb.fix.id, "hdlcd"); + hdlcd->fb.fbops = &hdlcd_ops; + hdlcd->fb.flags = FBINFO_FLAG_DEFAULT/* | FBINFO_VIRTFB*/; + + hdlcd->fb.fix.type = FB_TYPE_PACKED_PIXELS; + hdlcd->fb.fix.type_aux = 0; + hdlcd->fb.fix.xpanstep = 0; + hdlcd->fb.fix.ypanstep = 1; + hdlcd->fb.fix.ywrapstep = 0; + hdlcd->fb.fix.accel = FB_ACCEL_NONE; + + hdlcd->fb.var.nonstd = 0; + hdlcd->fb.var.activate = FB_ACTIVATE_NOW; + hdlcd->fb.var.height = -1; + hdlcd->fb.var.width = -1; + hdlcd->fb.var.accel_flags = 0; + + init_completion(&hdlcd->vsync_completion); + + if (hdlcd->edid) { + /* build modedb from EDID */ + fb_edid_to_monspecs(hdlcd->edid, &hdlcd->fb.monspecs); + fb_videomode_to_modelist(hdlcd->fb.monspecs.modedb, + hdlcd->fb.monspecs.modedb_len, + &hdlcd->fb.modelist); + fb_find_mode(&hdlcd->fb.var, &hdlcd->fb, fb_mode, + hdlcd->fb.monspecs.modedb, + hdlcd->fb.monspecs.modedb_len, + &hdlcd_default_mode, 32); + } else { + hdlcd->fb.monspecs.hfmin = 0; + hdlcd->fb.monspecs.hfmax = 100000; + hdlcd->fb.monspecs.vfmin = 0; + hdlcd->fb.monspecs.vfmax = 400; + hdlcd->fb.monspecs.dclkmin = 1000000; + hdlcd->fb.monspecs.dclkmax = 100000000; + fb_find_mode(&hdlcd->fb.var, &hdlcd->fb, fb_mode, NULL, 0, &hdlcd_default_mode, 32); + } + + dev_info(hdlcd->dev, "using %dx%d-%d@%d mode\n", hdlcd->fb.var.xres, + hdlcd->fb.var.yres, hdlcd->fb.var.bits_per_pixel, + hdlcd->fb.mode ? hdlcd->fb.mode->refresh : 60); + hdlcd->fb.var.xres_virtual = hdlcd->fb.var.xres; +#ifdef HDLCD_NO_VIRTUAL_SCREEN + hdlcd->fb.var.yres_virtual = hdlcd->fb.var.yres; +#else + hdlcd->fb.var.yres_virtual = hdlcd->fb.var.yres * 2; +#endif + + /* initialise and set the palette */ + if (fb_alloc_cmap(&hdlcd->fb.cmap, NR_PALETTE, 0)) { + dev_err(hdlcd->dev, "failed to allocate cmap memory\n"); + err = -ENOMEM; + goto setup_err; + } + fb_set_cmap(&hdlcd->fb.cmap, &hdlcd->fb); + + /* Allow max number of outstanding requests with the largest beat burst */ + WRITE_HDLCD_REG(HDLCD_REG_BUS_OPTIONS, HDLCD_BUS_MAX_OUTSTAND | HDLCD_BUS_BURST_16); + /* Set the framebuffer base to start of allocated memory */ + WRITE_HDLCD_REG(HDLCD_REG_FB_BASE, hdlcd->fb.fix.smem_start); +#ifdef HDLCD_COUNT_BUFFERUNDERRUNS + /* turn on underrun interrupt for counting */ + WRITE_HDLCD_REG(HDLCD_REG_INT_MASK, HDLCD_INTERRUPT_UNDERRUN); +#else + /* Ensure interrupts are disabled */ + WRITE_HDLCD_REG(HDLCD_REG_INT_MASK, 0); +#endif + fb_set_var(&hdlcd->fb, &hdlcd->fb.var); + + if (!register_framebuffer(&hdlcd->fb)) { + return 0; + } + + dev_err(hdlcd->dev, "HDLCD: cannot register framebuffer\n"); + + fb_dealloc_cmap(&hdlcd->fb.cmap); +setup_err: + iounmap(hdlcd->base); +kmalloc_err: + kfree(hdlcd->fb.pseudo_palette); +remap_err: + clk_unprepare(hdlcd->clk); +clk_prepare_err: + clk_put(hdlcd->clk); + return err; +} + +static inline unsigned char atohex(u8 data) +{ + if (!isxdigit(data)) + return 0; + /* truncate the upper nibble and add 9 to non-digit values */ + return (data > 0x39) ? ((data & 0xf) + 9) : (data & 0xf); +} + +/* EDID data is passed from devicetree in a literal string that can contain spaces and + the hexadecimal dump of the data */ +static int parse_edid_data(struct hdlcd_device *hdlcd, const u8 *edid_data, int data_len) +{ + int i, j; + + if (!edid_data) + return -EINVAL; + + hdlcd->edid = kzalloc(EDID_LENGTH, GFP_KERNEL); + if (!hdlcd->edid) + return -ENOMEM; + + for (i = 0, j = 0; i < data_len; i++) { + if (isspace(edid_data[i])) + continue; + hdlcd->edid[j++] = atohex(edid_data[i]); + if (j >= EDID_LENGTH) + break; + } + + if (j < EDID_LENGTH) { + kfree(hdlcd->edid); + hdlcd->edid = NULL; + return -EINVAL; + } + + return 0; +} + +static int hdlcd_probe(struct platform_device *pdev) +{ + int err = 0, i; + struct hdlcd_device *hdlcd; + struct resource *mem; +#ifdef CONFIG_OF + struct device_node *of_node; +#endif + + memset(&cached_var_screeninfo, 0, sizeof(struct fb_var_screeninfo)); + + dev_dbg(&pdev->dev, "HDLCD: probing\n"); + + hdlcd = kzalloc(sizeof(*hdlcd), GFP_KERNEL); + if (!hdlcd) + return -ENOMEM; + +#ifdef CONFIG_OF + of_node = pdev->dev.of_node; + if (of_node) { + int len; + const u8 *edid; + const __be32 *prop = of_get_property(of_node, "mode", &len); + if (prop) + strncpy(fb_mode, (char *)prop, len); + prop = of_get_property(of_node, "framebuffer", &len); + if (prop) { + hdlcd->fb.fix.smem_start = of_read_ulong(prop, + of_n_addr_cells(of_node)); + prop += of_n_addr_cells(of_node); + framebuffer_size = of_read_ulong(prop, + of_n_size_cells(of_node)); + if (framebuffer_size > HDLCD_MAX_FRAMEBUFFER_SIZE) + framebuffer_size = HDLCD_MAX_FRAMEBUFFER_SIZE; + dev_dbg(&pdev->dev, "HDLCD: phys_addr = 0x%lx, size = 0x%lx\n", + hdlcd->fb.fix.smem_start, framebuffer_size); + } + edid = of_get_property(of_node, "edid", &len); + if (edid) { + err = parse_edid_data(hdlcd, edid, len); +#ifdef CONFIG_SERIAL_AMBA_PCU_UART + } else { + /* ask the firmware to fetch the EDID */ + dev_dbg(&pdev->dev, "HDLCD: Requesting EDID data\n"); + hdlcd->edid = kzalloc(EDID_LENGTH, GFP_KERNEL); + if (!hdlcd->edid) + return -ENOMEM; + err = get_edid(hdlcd->edid); +#endif /* CONFIG_SERIAL_AMBA_PCU_UART */ + } + if (err) + dev_info(&pdev->dev, "HDLCD: Failed to parse EDID data\n"); + } +#endif /* CONFIG_OF */ + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(&pdev->dev, "HDLCD: cannot get platform resources\n"); + err = -EINVAL; + goto resource_err; + } + + i = platform_get_irq(pdev, 0); + if (i < 0) { + dev_err(&pdev->dev, "HDLCD: no irq defined for vsync\n"); + err = -ENOENT; + goto resource_err; + } else { + err = request_irq(i, hdlcd_irq, 0, dev_name(&pdev->dev), hdlcd); + if (err) { + dev_err(&pdev->dev, "HDLCD: unable to request irq\n"); + goto resource_err; + } + hdlcd->irq = i; + } + + if (!request_mem_region(mem->start, resource_size(mem), dev_name(&pdev->dev))) { + err = -ENXIO; + goto request_err; + } + + if (!hdlcd->fb.fix.smem_start) { + dev_err(&pdev->dev, "platform did not allocate frame buffer memory\n"); + err = -ENOMEM; + goto memalloc_err; + } + hdlcd->fb.screen_base = ioremap_wc(hdlcd->fb.fix.smem_start, framebuffer_size); + if (!hdlcd->fb.screen_base) { + dev_err(&pdev->dev, "unable to ioremap framebuffer\n"); + err = -ENOMEM; + goto probe_err; + } + + hdlcd->fb.screen_size = framebuffer_size; + hdlcd->fb.fix.smem_len = framebuffer_size; + hdlcd->fb.fix.mmio_start = mem->start; + hdlcd->fb.fix.mmio_len = resource_size(mem); + + /* Clear the framebuffer */ + memset(hdlcd->fb.screen_base, 0, framebuffer_size); + + hdlcd->dev = &pdev->dev; + + dev_dbg(&pdev->dev, "HDLCD: framebuffer virt base %p, phys base 0x%lX\n", + hdlcd->fb.screen_base, (unsigned long)hdlcd->fb.fix.smem_start); + + err = hdlcd_setup(hdlcd); + + if (err) + goto probe_err; + + platform_set_drvdata(pdev, hdlcd); + return 0; + +probe_err: + iounmap(hdlcd->fb.screen_base); + memblock_free(hdlcd->fb.fix.smem_start, hdlcd->fb.fix.smem_start); + +memalloc_err: + release_mem_region(mem->start, resource_size(mem)); + +request_err: + free_irq(hdlcd->irq, hdlcd); + +resource_err: + kfree(hdlcd); + + return err; +} + +static int hdlcd_remove(struct platform_device *pdev) +{ + struct hdlcd_device *hdlcd = platform_get_drvdata(pdev); + + clk_disable(hdlcd->clk); + clk_unprepare(hdlcd->clk); + clk_put(hdlcd->clk); + + /* unmap memory */ + iounmap(hdlcd->fb.screen_base); + iounmap(hdlcd->base); + + /* deallocate fb memory */ + fb_dealloc_cmap(&hdlcd->fb.cmap); + kfree(hdlcd->fb.pseudo_palette); + memblock_free(hdlcd->fb.fix.smem_start, hdlcd->fb.fix.smem_start); + release_mem_region(hdlcd->fb.fix.mmio_start, hdlcd->fb.fix.mmio_len); + + free_irq(hdlcd->irq, NULL); + kfree(hdlcd); + + return 0; +} + +#ifdef CONFIG_PM +static int hdlcd_suspend(struct platform_device *pdev, pm_message_t state) +{ + /* not implemented yet */ + return 0; +} + +static int hdlcd_resume(struct platform_device *pdev) +{ + /* not implemented yet */ + return 0; +} +#else +#define hdlcd_suspend NULL +#define hdlcd_resume NULL +#endif + +static struct platform_driver hdlcd_driver = { + .probe = hdlcd_probe, + .remove = hdlcd_remove, + .suspend = hdlcd_suspend, + .resume = hdlcd_resume, + .driver = { + .name = "hdlcd", + .owner = THIS_MODULE, + .of_match_table = hdlcd_of_matches, + }, +}; + +static int __init hdlcd_init(void) +{ +#ifdef HDLCD_COUNT_BUFFERUNDERRUNS + int err = platform_driver_register(&hdlcd_driver); + if (!err) + hdlcd_underrun_init(); + return err; +#else + return platform_driver_register(&hdlcd_driver); +#endif +} + +void __exit hdlcd_exit(void) +{ +#ifdef HDLCD_COUNT_BUFFERUNDERRUNS + hdlcd_underrun_close(); +#endif + platform_driver_unregister(&hdlcd_driver); +} + +module_init(hdlcd_init); +module_exit(hdlcd_exit); + +MODULE_AUTHOR("Liviu Dudau"); +MODULE_DESCRIPTION("ARM HDLCD core driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/video/vexpress-dvi.c b/drivers/video/vexpress-dvi.c new file mode 100644 index 00000000000..f08753450ee --- /dev/null +++ b/drivers/video/vexpress-dvi.c @@ -0,0 +1,220 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2012 ARM Limited + */ + +#define pr_fmt(fmt) "vexpress-dvi: " fmt + +#include <linux/fb.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/vexpress.h> + + +static struct vexpress_config_func *vexpress_dvimode_func; + +static struct { + u32 xres, yres, mode; +} vexpress_dvi_dvimodes[] = { + { 640, 480, 0 }, /* VGA */ + { 800, 600, 1 }, /* SVGA */ + { 1024, 768, 2 }, /* XGA */ + { 1280, 1024, 3 }, /* SXGA */ + { 1600, 1200, 4 }, /* UXGA */ + { 1920, 1080, 5 }, /* HD1080 */ +}; + +static void vexpress_dvi_mode_set(struct fb_info *info, u32 xres, u32 yres) +{ + int err = -ENOENT; + int i; + + if (!vexpress_dvimode_func) + return; + + for (i = 0; i < ARRAY_SIZE(vexpress_dvi_dvimodes); i++) { + if (vexpress_dvi_dvimodes[i].xres == xres && + vexpress_dvi_dvimodes[i].yres == yres) { + pr_debug("mode: %ux%u = %d\n", xres, yres, + vexpress_dvi_dvimodes[i].mode); + err = vexpress_config_write(vexpress_dvimode_func, 0, + vexpress_dvi_dvimodes[i].mode); + break; + } + } + + if (err) + pr_warn("Failed to set %ux%u mode! (%d)\n", xres, yres, err); +} + + +static struct vexpress_config_func *vexpress_muxfpga_func; +static int vexpress_dvi_fb = -1; + +static int vexpress_dvi_mux_set(struct fb_info *info) +{ + int err; + u32 site = vexpress_get_site_by_dev(info->device); + + if (!vexpress_muxfpga_func) + return -ENXIO; + + err = vexpress_config_write(vexpress_muxfpga_func, 0, site); + if (!err) { + pr_debug("Selected MUXFPGA input %d (fb%d)\n", site, + info->node); + vexpress_dvi_fb = info->node; + vexpress_dvi_mode_set(info, info->var.xres, + info->var.yres); + } else { + pr_warn("Failed to select MUXFPGA input %d (fb%d)! (%d)\n", + site, info->node, err); + } + + return err; +} + +static int vexpress_dvi_fb_select(int fb) +{ + int err; + struct fb_info *info; + + /* fb0 is the default */ + if (fb < 0) + fb = 0; + + info = registered_fb[fb]; + if (!info || !lock_fb_info(info)) + return -ENODEV; + + err = vexpress_dvi_mux_set(info); + + unlock_fb_info(info); + + return err; +} + +static ssize_t vexpress_dvi_fb_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", vexpress_dvi_fb); +} + +static ssize_t vexpress_dvi_fb_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + long value; + int err = kstrtol(buf, 0, &value); + + if (!err) + err = vexpress_dvi_fb_select(value); + + return err ? err : count; +} + +DEVICE_ATTR(fb, S_IRUGO | S_IWUSR, vexpress_dvi_fb_show, + vexpress_dvi_fb_store); + + +static int vexpress_dvi_fb_event_notify(struct notifier_block *self, + unsigned long action, void *data) +{ + struct fb_event *event = data; + struct fb_info *info = event->info; + struct fb_videomode *mode = event->data; + + switch (action) { + case FB_EVENT_FB_REGISTERED: + if (vexpress_dvi_fb < 0) + vexpress_dvi_mux_set(info); + break; + case FB_EVENT_MODE_CHANGE: + case FB_EVENT_MODE_CHANGE_ALL: + if (info->node == vexpress_dvi_fb) + vexpress_dvi_mode_set(info, mode->xres, mode->yres); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block vexpress_dvi_fb_notifier = { + .notifier_call = vexpress_dvi_fb_event_notify, +}; +static bool vexpress_dvi_fb_notifier_registered; + + +enum vexpress_dvi_func { FUNC_MUXFPGA, FUNC_DVIMODE }; + +static struct of_device_id vexpress_dvi_of_match[] = { + { + .compatible = "arm,vexpress-muxfpga", + .data = (void *)FUNC_MUXFPGA, + }, { + .compatible = "arm,vexpress-dvimode", + .data = (void *)FUNC_DVIMODE, + }, + {} +}; + +static int vexpress_dvi_probe(struct platform_device *pdev) +{ + enum vexpress_dvi_func func; + const struct of_device_id *match = + of_match_device(vexpress_dvi_of_match, &pdev->dev); + + if (match) + func = (enum vexpress_dvi_func)match->data; + else + func = pdev->id_entry->driver_data; + + switch (func) { + case FUNC_MUXFPGA: + vexpress_muxfpga_func = + vexpress_config_func_get_by_dev(&pdev->dev); + device_create_file(&pdev->dev, &dev_attr_fb); + break; + case FUNC_DVIMODE: + vexpress_dvimode_func = + vexpress_config_func_get_by_dev(&pdev->dev); + break; + } + + if (!vexpress_dvi_fb_notifier_registered) { + fb_register_client(&vexpress_dvi_fb_notifier); + vexpress_dvi_fb_notifier_registered = true; + } + + vexpress_dvi_fb_select(vexpress_dvi_fb); + + return 0; +} + +static const struct platform_device_id vexpress_dvi_id_table[] = { + { .name = "vexpress-muxfpga", .driver_data = FUNC_MUXFPGA, }, + { .name = "vexpress-dvimode", .driver_data = FUNC_DVIMODE, }, + {} +}; + +static struct platform_driver vexpress_dvi_driver = { + .probe = vexpress_dvi_probe, + .driver = { + .name = "vexpress-dvi", + .of_match_table = vexpress_dvi_of_match, + }, + .id_table = vexpress_dvi_id_table, +}; + +static int __init vexpress_dvi_init(void) +{ + return platform_driver_register(&vexpress_dvi_driver); +} +device_initcall(vexpress_dvi_init); |