aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/early_ioremap.h42
-rw-r--r--include/asm-generic/vmlinux.lds.h11
-rw-r--r--include/asm-generic/word-at-a-time.h2
-rw-r--r--include/crypto/algapi.h9
-rw-r--r--include/kvm/arm_arch_timer.h14
-rw-r--r--include/kvm/arm_vgic.h227
-rw-r--r--include/linux/amba/bus.h1
-rw-r--r--include/linux/amba/clcd.h10
-rw-r--r--include/linux/arm-hdlcd.h122
-rw-r--r--include/linux/clk-provider.h3
-rw-r--r--include/linux/coresight.h251
-rw-r--r--include/linux/cpufeature.h60
-rw-r--r--include/linux/efi.h293
-rw-r--r--include/linux/ftrace.h34
-rw-r--r--include/linux/irqchip/arm-gic-v3.h200
-rw-r--r--include/linux/irqchip/arm-gic.h9
-rw-r--r--include/linux/kvm_host.h71
-rw-r--r--include/linux/kvm_types.h14
-rw-r--r--include/linux/mailbox_client.h46
-rw-r--r--include/linux/mailbox_controller.h133
-rw-r--r--include/linux/mod_devicetable.h9
-rw-r--r--include/linux/of.h315
-rw-r--r--include/linux/of_address.h1
-rw-r--r--include/linux/of_fdt.h16
-rw-r--r--include/linux/of_graph.h65
-rw-r--r--include/linux/of_platform.h8
-rw-r--r--include/linux/of_reserved_mem.h53
-rw-r--r--include/linux/pl320-ipc.h (renamed from include/linux/mailbox.h)0
-rw-r--r--include/linux/pm.h1
-rw-r--r--include/linux/pm_domain.h52
-rw-r--r--include/linux/serial_core.h16
-rw-r--r--include/linux/usb/xhci_pdriver.h27
-rw-r--r--include/trace/events/kvm.h8
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/kvm.h40
-rw-r--r--include/uapi/linux/psci.h90
36 files changed, 2097 insertions, 157 deletions
diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h
new file mode 100644
index 000000000000..a5de55c04fb2
--- /dev/null
+++ b/include/asm-generic/early_ioremap.h
@@ -0,0 +1,42 @@
+#ifndef _ASM_EARLY_IOREMAP_H_
+#define _ASM_EARLY_IOREMAP_H_
+
+#include <linux/types.h>
+
+/*
+ * early_ioremap() and early_iounmap() are for temporary early boot-time
+ * mappings, before the real ioremap() is functional.
+ */
+extern void __iomem *early_ioremap(resource_size_t phys_addr,
+ unsigned long size);
+extern void *early_memremap(resource_size_t phys_addr,
+ unsigned long size);
+extern void early_iounmap(void __iomem *addr, unsigned long size);
+extern void early_memunmap(void *addr, unsigned long size);
+
+/*
+ * Weak function called by early_ioremap_reset(). It does nothing, but
+ * architectures may provide their own version to do any needed cleanups.
+ */
+extern void early_ioremap_shutdown(void);
+
+#if defined(CONFIG_GENERIC_EARLY_IOREMAP) && defined(CONFIG_MMU)
+/* Arch-specific initialization */
+extern void early_ioremap_init(void);
+
+/* Generic initialization called by architecture code */
+extern void early_ioremap_setup(void);
+
+/*
+ * Called as last step in paging_init() so library can act
+ * accordingly for subsequent map/unmap requests.
+ */
+extern void early_ioremap_reset(void);
+
+#else
+static inline void early_ioremap_init(void) { }
+static inline void early_ioremap_setup(void) { }
+static inline void early_ioremap_reset(void) { }
+#endif
+
+#endif /* _ASM_EARLY_IOREMAP_H_ */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index bc2121fa9132..f10f64fcc815 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -167,6 +167,16 @@
#define CLK_OF_TABLES()
#endif
+#ifdef CONFIG_OF_RESERVED_MEM
+#define RESERVEDMEM_OF_TABLES() \
+ . = ALIGN(8); \
+ VMLINUX_SYMBOL(__reservedmem_of_table) = .; \
+ *(__reservedmem_of_table) \
+ *(__reservedmem_of_table_end)
+#else
+#define RESERVEDMEM_OF_TABLES()
+#endif
+
#define KERNEL_DTB() \
STRUCT_ALIGN(); \
VMLINUX_SYMBOL(__dtb_start) = .; \
@@ -490,6 +500,7 @@
TRACE_SYSCALLS() \
MEM_DISCARD(init.rodata) \
CLK_OF_TABLES() \
+ RESERVEDMEM_OF_TABLES() \
CLKSRC_OF_TABLES() \
KERNEL_DTB() \
IRQCHIP_OF_MATCH_TABLE()
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
index d96deb443f18..94f9ea8abcae 100644
--- a/include/asm-generic/word-at-a-time.h
+++ b/include/asm-generic/word-at-a-time.h
@@ -50,7 +50,7 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
}
#ifndef zero_bytemask
-#define zero_bytemask(mask) (~0ul << __fls(mask) << 1)
+#define zero_bytemask(mask) (~1ul << __fls(mask))
#endif
#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index e73c19e90e38..016c2f110f63 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -100,9 +100,12 @@ struct blkcipher_walk {
void *page;
u8 *buffer;
u8 *iv;
+ unsigned int ivsize;
int flags;
- unsigned int blocksize;
+ unsigned int walk_blocksize;
+ unsigned int cipher_blocksize;
+ unsigned int alignmask;
};
struct ablkcipher_walk {
@@ -192,6 +195,10 @@ int blkcipher_walk_phys(struct blkcipher_desc *desc,
int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
struct blkcipher_walk *walk,
unsigned int blocksize);
+int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
+ struct blkcipher_walk *walk,
+ struct crypto_aead *tfm,
+ unsigned int blocksize);
int ablkcipher_walk_done(struct ablkcipher_request *req,
struct ablkcipher_walk *walk, int err);
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index 6d9aeddc09bf..ad9db6045b2f 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -67,6 +67,10 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
+
+u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
+int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);
+
#else
static inline int kvm_timer_hyp_init(void)
{
@@ -84,6 +88,16 @@ static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {}
static inline void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) {}
static inline void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) {}
static inline void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) {}
+
+static inline int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
+{
+ return 0;
+}
+
+static inline u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
+{
+ return 0;
+}
#endif
#endif
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index f27000f55a83..2f2aac8448a4 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -24,26 +24,26 @@
#include <linux/irqreturn.h>
#include <linux/spinlock.h>
#include <linux/types.h>
-#include <linux/irqchip/arm-gic.h>
-#define VGIC_NR_IRQS 256
+#define VGIC_NR_IRQS_LEGACY 256
#define VGIC_NR_SGIS 16
#define VGIC_NR_PPIS 16
#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS)
-#define VGIC_NR_SHARED_IRQS (VGIC_NR_IRQS - VGIC_NR_PRIVATE_IRQS)
-#define VGIC_MAX_CPUS KVM_MAX_VCPUS
-#define VGIC_MAX_LRS (1 << 6)
+
+#define VGIC_V2_MAX_LRS (1 << 6)
+#define VGIC_V3_MAX_LRS 16
+#define VGIC_MAX_IRQS 1024
/* Sanity checks... */
-#if (VGIC_MAX_CPUS > 8)
+#if (KVM_MAX_VCPUS > 8)
#error Invalid number of CPU interfaces
#endif
-#if (VGIC_NR_IRQS & 31)
+#if (VGIC_NR_IRQS_LEGACY & 31)
#error "VGIC_NR_IRQS must be a multiple of 32"
#endif
-#if (VGIC_NR_IRQS > 1024)
+#if (VGIC_NR_IRQS_LEGACY > VGIC_MAX_IRQS)
#error "VGIC_NR_IRQS must be <= 1024"
#endif
@@ -53,26 +53,96 @@
* - a bunch of shared interrupts (SPI)
*/
struct vgic_bitmap {
- union {
- u32 reg[VGIC_NR_PRIVATE_IRQS / 32];
- DECLARE_BITMAP(reg_ul, VGIC_NR_PRIVATE_IRQS);
- } percpu[VGIC_MAX_CPUS];
- union {
- u32 reg[VGIC_NR_SHARED_IRQS / 32];
- DECLARE_BITMAP(reg_ul, VGIC_NR_SHARED_IRQS);
- } shared;
+ /*
+ * - One UL per VCPU for private interrupts (assumes UL is at
+ * least 32 bits)
+ * - As many UL as necessary for shared interrupts.
+ *
+ * The private interrupts are accessed via the "private"
+ * field, one UL per vcpu (the state for vcpu n is in
+ * private[n]). The shared interrupts are accessed via the
+ * "shared" pointer (IRQn state is at bit n-32 in the bitmap).
+ */
+ unsigned long *private;
+ unsigned long *shared;
};
struct vgic_bytemap {
- u32 percpu[VGIC_MAX_CPUS][VGIC_NR_PRIVATE_IRQS / 4];
- u32 shared[VGIC_NR_SHARED_IRQS / 4];
+ /*
+ * - 8 u32 per VCPU for private interrupts
+ * - As many u32 as necessary for shared interrupts.
+ *
+ * The private interrupts are accessed via the "private"
+ * field, (the state for vcpu n is in private[n*8] to
+ * private[n*8 + 7]). The shared interrupts are accessed via
+ * the "shared" pointer (IRQn state is at byte (n-32)%4 of the
+ * shared[(n-32)/4] word).
+ */
+ u32 *private;
+ u32 *shared;
+};
+
+struct kvm_vcpu;
+
+enum vgic_type {
+ VGIC_V2, /* Good ol' GICv2 */
+ VGIC_V3, /* New fancy GICv3 */
+};
+
+#define LR_STATE_PENDING (1 << 0)
+#define LR_STATE_ACTIVE (1 << 1)
+#define LR_STATE_MASK (3 << 0)
+#define LR_EOI_INT (1 << 2)
+
+struct vgic_lr {
+ u16 irq;
+ u8 source;
+ u8 state;
+};
+
+struct vgic_vmcr {
+ u32 ctlr;
+ u32 abpr;
+ u32 bpr;
+ u32 pmr;
+};
+
+struct vgic_ops {
+ struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int);
+ void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
+ void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr);
+ u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
+ u64 (*get_eisr)(const struct kvm_vcpu *vcpu);
+ u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu);
+ void (*enable_underflow)(struct kvm_vcpu *vcpu);
+ void (*disable_underflow)(struct kvm_vcpu *vcpu);
+ void (*get_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+ void (*set_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+ void (*enable)(struct kvm_vcpu *vcpu);
+};
+
+struct vgic_params {
+ /* vgic type */
+ enum vgic_type type;
+ /* Physical address of vgic virtual cpu interface */
+ phys_addr_t vcpu_base;
+ /* Number of list registers */
+ u32 nr_lr;
+ /* Interrupt number */
+ unsigned int maint_irq;
+ /* Virtual control interface base address */
+ void __iomem *vctrl_base;
};
struct vgic_dist {
#ifdef CONFIG_KVM_ARM_VGIC
spinlock_t lock;
+ bool in_kernel;
bool ready;
+ int nr_cpus;
+ int nr_irqs;
+
/* Virtual control interface mapping */
void __iomem *vctrl_base;
@@ -86,11 +156,25 @@ struct vgic_dist {
/* Interrupt enabled (one bit per IRQ) */
struct vgic_bitmap irq_enabled;
- /* Interrupt 'pin' level */
- struct vgic_bitmap irq_state;
+ /* Level-triggered interrupt external input is asserted */
+ struct vgic_bitmap irq_level;
+
+ /*
+ * Interrupt state is pending on the distributor
+ */
+ struct vgic_bitmap irq_pending;
- /* Level-triggered interrupt in progress */
- struct vgic_bitmap irq_active;
+ /*
+ * Tracks writes to GICD_ISPENDRn and GICD_ICPENDRn for level-triggered
+ * interrupts. Essentially holds the state of the flip-flop in
+ * Figure 4-10 on page 4-101 in ARM IHI 0048B.b.
+ * Once set, it is only cleared for level-triggered interrupts on
+ * guest ACKs (when we queue it) or writes to GICD_ICPENDRn.
+ */
+ struct vgic_bitmap irq_soft_pend;
+
+ /* Level-triggered interrupt queued on VCPU interface */
+ struct vgic_bitmap irq_queued;
/* Interrupt priority. Not used yet. */
struct vgic_bytemap irq_priority;
@@ -98,46 +182,90 @@ struct vgic_dist {
/* Level/edge triggered */
struct vgic_bitmap irq_cfg;
- /* Source CPU per SGI and target CPU */
- u8 irq_sgi_sources[VGIC_MAX_CPUS][VGIC_NR_SGIS];
-
- /* Target CPU for each IRQ */
- u8 irq_spi_cpu[VGIC_NR_SHARED_IRQS];
- struct vgic_bitmap irq_spi_target[VGIC_MAX_CPUS];
+ /*
+ * Source CPU per SGI and target CPU:
+ *
+ * Each byte represent a SGI observable on a VCPU, each bit of
+ * this byte indicating if the corresponding VCPU has
+ * generated this interrupt. This is a GICv2 feature only.
+ *
+ * For VCPUn (n < 8), irq_sgi_sources[n*16] to [n*16 + 15] are
+ * the SGIs observable on VCPUn.
+ */
+ u8 *irq_sgi_sources;
+
+ /*
+ * Target CPU for each SPI:
+ *
+ * Array of available SPI, each byte indicating the target
+ * VCPU for SPI. IRQn (n >=32) is at irq_spi_cpu[n-32].
+ */
+ u8 *irq_spi_cpu;
+
+ /*
+ * Reverse lookup of irq_spi_cpu for faster compute pending:
+ *
+ * Array of bitmaps, one per VCPU, describing if IRQn is
+ * routed to a particular VCPU.
+ */
+ struct vgic_bitmap *irq_spi_target;
/* Bitmap indicating which CPU has something pending */
- unsigned long irq_pending_on_cpu;
+ unsigned long *irq_pending_on_cpu;
+#endif
+};
+
+struct vgic_v2_cpu_if {
+ u32 vgic_hcr;
+ u32 vgic_vmcr;
+ u32 vgic_misr; /* Saved only */
+ u32 vgic_eisr[2]; /* Saved only */
+ u32 vgic_elrsr[2]; /* Saved only */
+ u32 vgic_apr;
+ u32 vgic_lr[VGIC_V2_MAX_LRS];
+};
+
+struct vgic_v3_cpu_if {
+#ifdef CONFIG_ARM_GIC_V3
+ u32 vgic_hcr;
+ u32 vgic_vmcr;
+ u32 vgic_misr; /* Saved only */
+ u32 vgic_eisr; /* Saved only */
+ u32 vgic_elrsr; /* Saved only */
+ u32 vgic_ap0r[4];
+ u32 vgic_ap1r[4];
+ u64 vgic_lr[VGIC_V3_MAX_LRS];
#endif
};
struct vgic_cpu {
#ifdef CONFIG_KVM_ARM_VGIC
/* per IRQ to LR mapping */
- u8 vgic_irq_lr_map[VGIC_NR_IRQS];
+ u8 *vgic_irq_lr_map;
/* Pending interrupts on this VCPU */
DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS);
- DECLARE_BITMAP( pending_shared, VGIC_NR_SHARED_IRQS);
+ unsigned long *pending_shared;
/* Bitmap of used/free list registers */
- DECLARE_BITMAP( lr_used, VGIC_MAX_LRS);
+ DECLARE_BITMAP( lr_used, VGIC_V2_MAX_LRS);
/* Number of list registers on this CPU */
int nr_lr;
/* CPU vif control registers for world switch */
- u32 vgic_hcr;
- u32 vgic_vmcr;
- u32 vgic_misr; /* Saved only */
- u32 vgic_eisr[2]; /* Saved only */
- u32 vgic_elrsr[2]; /* Saved only */
- u32 vgic_apr;
- u32 vgic_lr[VGIC_MAX_LRS];
+ union {
+ struct vgic_v2_cpu_if vgic_v2;
+ struct vgic_v3_cpu_if vgic_v3;
+ };
#endif
};
#define LR_EMPTY 0xff
+#define INT_STATUS_EOI (1 << 0)
+#define INT_STATUS_UNDERFLOW (1 << 1)
+
struct kvm;
struct kvm_vcpu;
struct kvm_run;
@@ -148,7 +276,8 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
int kvm_vgic_hyp_init(void);
int kvm_vgic_init(struct kvm *kvm);
int kvm_vgic_create(struct kvm *kvm);
-int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu);
+void kvm_vgic_destroy(struct kvm *kvm);
+void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
@@ -157,9 +286,25 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_exit_mmio *mmio);
-#define irqchip_in_kernel(k) (!!((k)->arch.vgic.vctrl_base))
+#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
#define vgic_initialized(k) ((k)->arch.vgic.ready)
+int vgic_v2_probe(struct device_node *vgic_node,
+ const struct vgic_ops **ops,
+ const struct vgic_params **params);
+#ifdef CONFIG_ARM_GIC_V3
+int vgic_v3_probe(struct device_node *vgic_node,
+ const struct vgic_ops **ops,
+ const struct vgic_params **params);
+#else
+static inline int vgic_v3_probe(struct device_node *vgic_node,
+ const struct vgic_ops **ops,
+ const struct vgic_params **params)
+{
+ return -ENODEV;
+}
+#endif
+
#else
static inline int kvm_vgic_hyp_init(void)
{
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 63b5eff0a80f..c814346ba93c 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -23,6 +23,7 @@
#define AMBA_NR_IRQS 9
#define AMBA_CID 0xb105f00d
+#define CORESIGHT_CID 0xb105900d
struct clk;
diff --git a/include/linux/amba/clcd.h b/include/linux/amba/clcd.h
index e82e3ee2c54a..4a53250f970e 100644
--- a/include/linux/amba/clcd.h
+++ b/include/linux/amba/clcd.h
@@ -243,6 +243,9 @@ static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
val |= CNTL_BGR;
}
+ /* Reset the current colour depth */
+ val &= ~CNTL_LCDBPP16_444;
+
switch (var->bits_per_pixel) {
case 1:
val |= CNTL_LCDBPP1;
@@ -264,14 +267,15 @@ static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
*/
if (amba_part(fb->dev) == 0x110 ||
var->green.length == 5)
- val |= CNTL_LCDBPP16;
+ val |= CNTL_LCDBPP16 | CNTL_BGR;
else if (var->green.length == 6)
- val |= CNTL_LCDBPP16_565;
+ val |= CNTL_LCDBPP16_565 | CNTL_BGR;
else
- val |= CNTL_LCDBPP16_444;
+ val |= CNTL_LCDBPP16_444 | CNTL_BGR;
break;
case 32:
val |= CNTL_LCDBPP24;
+ val &= ~CNTL_BGR;
break;
}
diff --git a/include/linux/arm-hdlcd.h b/include/linux/arm-hdlcd.h
new file mode 100644
index 000000000000..939f3a81d56b
--- /dev/null
+++ b/include/linux/arm-hdlcd.h
@@ -0,0 +1,122 @@
+/*
+ * include/linux/arm-hdlcd.h
+ *
+ * Copyright (C) 2011 ARM Limited
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * ARM HDLCD Controller register definition
+ */
+
+#include <linux/fb.h>
+#include <linux/completion.h>
+
+/* register offsets */
+#define HDLCD_REG_VERSION 0x0000 /* ro */
+#define HDLCD_REG_INT_RAWSTAT 0x0010 /* rw */
+#define HDLCD_REG_INT_CLEAR 0x0014 /* wo */
+#define HDLCD_REG_INT_MASK 0x0018 /* rw */
+#define HDLCD_REG_INT_STATUS 0x001c /* ro */
+#define HDLCD_REG_USER_OUT 0x0020 /* rw */
+#define HDLCD_REG_FB_BASE 0x0100 /* rw */
+#define HDLCD_REG_FB_LINE_LENGTH 0x0104 /* rw */
+#define HDLCD_REG_FB_LINE_COUNT 0x0108 /* rw */
+#define HDLCD_REG_FB_LINE_PITCH 0x010c /* rw */
+#define HDLCD_REG_BUS_OPTIONS 0x0110 /* rw */
+#define HDLCD_REG_V_SYNC 0x0200 /* rw */
+#define HDLCD_REG_V_BACK_PORCH 0x0204 /* rw */
+#define HDLCD_REG_V_DATA 0x0208 /* rw */
+#define HDLCD_REG_V_FRONT_PORCH 0x020c /* rw */
+#define HDLCD_REG_H_SYNC 0x0210 /* rw */
+#define HDLCD_REG_H_BACK_PORCH 0x0214 /* rw */
+#define HDLCD_REG_H_DATA 0x0218 /* rw */
+#define HDLCD_REG_H_FRONT_PORCH 0x021c /* rw */
+#define HDLCD_REG_POLARITIES 0x0220 /* rw */
+#define HDLCD_REG_COMMAND 0x0230 /* rw */
+#define HDLCD_REG_PIXEL_FORMAT 0x0240 /* rw */
+#define HDLCD_REG_BLUE_SELECT 0x0244 /* rw */
+#define HDLCD_REG_GREEN_SELECT 0x0248 /* rw */
+#define HDLCD_REG_RED_SELECT 0x024c /* rw */
+
+/* version */
+#define HDLCD_PRODUCT_ID 0x1CDC0000
+#define HDLCD_PRODUCT_MASK 0xFFFF0000
+#define HDLCD_VERSION_MAJOR_MASK 0x0000FF00
+#define HDLCD_VERSION_MINOR_MASK 0x000000FF
+
+/* interrupts */
+#define HDLCD_INTERRUPT_DMA_END (1 << 0)
+#define HDLCD_INTERRUPT_BUS_ERROR (1 << 1)
+#define HDLCD_INTERRUPT_VSYNC (1 << 2)
+#define HDLCD_INTERRUPT_UNDERRUN (1 << 3)
+
+/* polarity */
+#define HDLCD_POLARITY_VSYNC (1 << 0)
+#define HDLCD_POLARITY_HSYNC (1 << 1)
+#define HDLCD_POLARITY_DATAEN (1 << 2)
+#define HDLCD_POLARITY_DATA (1 << 3)
+#define HDLCD_POLARITY_PIXELCLK (1 << 4)
+
+/* commands */
+#define HDLCD_COMMAND_DISABLE (0 << 0)
+#define HDLCD_COMMAND_ENABLE (1 << 0)
+
+/* pixel format */
+#define HDLCD_PIXEL_FMT_LITTLE_ENDIAN (0 << 31)
+#define HDLCD_PIXEL_FMT_BIG_ENDIAN (1 << 31)
+#define HDLCD_BYTES_PER_PIXEL_MASK (3 << 3)
+
+/* bus options */
+#define HDLCD_BUS_BURST_MASK 0x01f
+#define HDLCD_BUS_MAX_OUTSTAND 0xf00
+#define HDLCD_BUS_BURST_NONE (0 << 0)
+#define HDLCD_BUS_BURST_1 (1 << 0)
+#define HDLCD_BUS_BURST_2 (1 << 1)
+#define HDLCD_BUS_BURST_4 (1 << 2)
+#define HDLCD_BUS_BURST_8 (1 << 3)
+#define HDLCD_BUS_BURST_16 (1 << 4)
+
+/* Max resolution supported is 4096x4096, 8 bit per color component,
+ 8 bit alpha, but we are going to choose the usual hardware default
+ (2048x2048, 32 bpp) and enable double buffering */
+#define HDLCD_MAX_XRES 2048
+#define HDLCD_MAX_YRES 2048
+#define HDLCD_MAX_FRAMEBUFFER_SIZE (HDLCD_MAX_XRES * HDLCD_MAX_YRES << 2)
+
+#define HDLCD_MEM_BASE (CONFIG_PAGE_OFFSET - 0x1000000)
+
+#define NR_PALETTE 256
+
+/* OEMs using HDLCD may wish to enable these settings if
+ * display disruption is apparent and you suspect HDLCD
+ * access to RAM may be starved.
+ */
+/* Turn HDLCD default color red instead of black so
+ * that it's easy to see pixel clock data underruns
+ * (compared to other visual disruption)
+ */
+//#define HDLCD_RED_DEFAULT_COLOUR
+/* Add a counter in the IRQ handler to count buffer underruns
+ * and /proc/hdlcd_underrun to read the counter
+ */
+//#define HDLCD_COUNT_BUFFERUNDERRUNS
+/* Restrict height to 1x screen size
+ *
+ */
+//#define HDLCD_NO_VIRTUAL_SCREEN
+
+#ifdef CONFIG_ANDROID
+#define HDLCD_NO_VIRTUAL_SCREEN
+#endif
+
+struct hdlcd_device {
+ struct fb_info fb;
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *base;
+ int irq;
+ struct completion vsync_completion;
+ unsigned char *edid;
+};
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 939533da93a7..2045a128c987 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -304,6 +304,8 @@ struct clk_div_table {
* of this register, and mask of divider bits are in higher 16-bit of this
* register. While setting the divider bits, higher 16-bit should also be
* updated to indicate changing divider bits.
+ * CLK_DIVIDER_ROUND_CLOSEST - Makes the best calculated divider to be rounded
+ * to the closest integer instead of the up one.
*/
struct clk_divider {
struct clk_hw hw;
@@ -319,6 +321,7 @@ struct clk_divider {
#define CLK_DIVIDER_POWER_OF_TWO BIT(1)
#define CLK_DIVIDER_ALLOW_ZERO BIT(2)
#define CLK_DIVIDER_HIWORD_MASK BIT(3)
+#define CLK_DIVIDER_ROUND_CLOSEST BIT(4)
extern const struct clk_ops clk_divider_ops;
struct clk *clk_register_divider(struct device *dev, const char *name,
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
new file mode 100644
index 000000000000..44c1597a738a
--- /dev/null
+++ b/include/linux/coresight.h
@@ -0,0 +1,251 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_CORESIGHT_H
+#define _LINUX_CORESIGHT_H
+
+#include <linux/device.h>
+
+/* Peripheral id registers (0xFD0-0xFEC) */
+#define CORESIGHT_PERIPHIDR4 0xfd0
+#define CORESIGHT_PERIPHIDR5 0xfd4
+#define CORESIGHT_PERIPHIDR6 0xfd8
+#define CORESIGHT_PERIPHIDR7 0xfdC
+#define CORESIGHT_PERIPHIDR0 0xfe0
+#define CORESIGHT_PERIPHIDR1 0xfe4
+#define CORESIGHT_PERIPHIDR2 0xfe8
+#define CORESIGHT_PERIPHIDR3 0xfeC
+/* Component id registers (0xFF0-0xFFC) */
+#define CORESIGHT_COMPIDR0 0xff0
+#define CORESIGHT_COMPIDR1 0xff4
+#define CORESIGHT_COMPIDR2 0xff8
+#define CORESIGHT_COMPIDR3 0xffC
+
+#define ETM_ARCH_V3_3 0x23
+#define ETM_ARCH_V3_5 0x25
+#define PFT_ARCH_V1_0 0x30
+#define PFT_ARCH_V1_1 0x31
+
+#define CORESIGHT_UNLOCK 0xc5acce55
+
+extern struct bus_type coresight_bustype;
+
+enum coresight_dev_type {
+ CORESIGHT_DEV_TYPE_NONE,
+ CORESIGHT_DEV_TYPE_SINK,
+ CORESIGHT_DEV_TYPE_LINK,
+ CORESIGHT_DEV_TYPE_LINKSINK,
+ CORESIGHT_DEV_TYPE_SOURCE,
+};
+
+enum coresight_dev_subtype_sink {
+ CORESIGHT_DEV_SUBTYPE_SINK_NONE,
+ CORESIGHT_DEV_SUBTYPE_SINK_PORT,
+ CORESIGHT_DEV_SUBTYPE_SINK_BUFFER,
+};
+
+enum coresight_dev_subtype_link {
+ CORESIGHT_DEV_SUBTYPE_LINK_NONE,
+ CORESIGHT_DEV_SUBTYPE_LINK_MERG,
+ CORESIGHT_DEV_SUBTYPE_LINK_SPLIT,
+ CORESIGHT_DEV_SUBTYPE_LINK_FIFO,
+};
+
+enum coresight_dev_subtype_source {
+ CORESIGHT_DEV_SUBTYPE_SOURCE_NONE,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_PROC,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_BUS,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE,
+};
+
+/**
+ * struct coresight_dev_subtype - further characterisation of a type
+ * @sink_subtype: type of sink this component is, as defined
+ by @coresight_dev_subtype_sink.
+ * @link_subtype: type of link this component is, as defined
+ by @coresight_dev_subtype_link.
+ * @source_subtype: type of source this component is, as defined
+ by @coresight_dev_subtype_source.
+ */
+struct coresight_dev_subtype {
+ enum coresight_dev_subtype_sink sink_subtype;
+ enum coresight_dev_subtype_link link_subtype;
+ enum coresight_dev_subtype_source source_subtype;
+};
+
+/**
+ * struct coresight_platform_data - data harvested from the DT specification
+ * @cpu: the CPU a source belongs to. Only applicable for ETM/PTMs.
+ * @name: name of the component as shown under sysfs.
+ * @nr_inport: number of input ports for this component.
+ * @outports: list of remote enpoint port number.
+ * @child_names:name of all child components connected to this device.
+ * @child_ports:child component port number the current component is
+ connected to.
+ * @nr_outport: number of output ports for this component.
+ * @clk: The clock this component is associated to.
+ */
+struct coresight_platform_data {
+ int cpu;
+ const char *name;
+ int nr_inport;
+ int *outports;
+ const char **child_names;
+ int *child_ports;
+ int nr_outport;
+ struct clk *clk;
+};
+
+/**
+ * struct coresight_desc - description of a component required from drivers
+ * @type: as defined by @coresight_dev_type.
+ * @subtype: as defined by @coresight_dev_subtype.
+ * @ops: generic operations for this component, as defined
+ by @coresight_ops.
+ * @pdata: platform data collected from DT.
+ * @dev: The device entity associated to this component.
+ * @groups :operations specific to this component. These will end up
+ in the component's sysfs sub-directory.
+ */
+struct coresight_desc {
+ enum coresight_dev_type type;
+ struct coresight_dev_subtype subtype;
+ const struct coresight_ops *ops;
+ struct coresight_platform_data *pdata;
+ struct device *dev;
+ const struct attribute_group **groups;
+};
+
+/**
+ * struct coresight_connection - representation of a single connection
+ * @ref_count: keeping count a port' references.
+ * @outport: a connection's output port number.
+ * @chid_name: remote component's name.
+ * @child_port: remote component's port number @output is connected to.
+ * @child_dev: a @coresight_device representation of the component
+ connected to @outport.
+ */
+struct coresight_connection {
+ int outport;
+ const char *child_name;
+ int child_port;
+ struct coresight_device *child_dev;
+};
+
+/**
+ * struct coresight_device - representation of a device as used by the framework
+ * @nr_inport: number of input port associated to this component.
+ * @nr_outport: number of output port associated to this component.
+ * @type: as defined by @coresight_dev_type.
+ * @subtype: as defined by @coresight_dev_subtype.
+ * @ops: generic operations for this component, as defined
+ by @coresight_ops.
+ * @dev: The device entity associated to this component.
+ * @refcnt: keep track of what is in use.
+ * @path_link: link of current component into the path being enabled.
+ * @orphan: true if the component has connections that haven't been linked.
+ * @enable: 'true' if component is currently part of an active path.
+ * @activated: 'true' only if a _sink_ has been activated. A sink can be
+ activated but not yet enabled. Enabling for a _sink_
+ happens when a source has been selected for that it.
+ */
+struct coresight_device {
+ struct coresight_connection *conns;
+ int nr_inport;
+ int nr_outport;
+ enum coresight_dev_type type;
+ struct coresight_dev_subtype subtype;
+ const struct coresight_ops *ops;
+ struct device dev;
+ atomic_t *refcnt;
+ struct list_head path_link;
+ bool orphan;
+ bool enable; /* true only if configured as part of a path */
+ bool activated; /* true only if a sink is part of a path */
+};
+
+#define to_coresight_device(d) container_of(d, struct coresight_device, dev)
+
+#define source_ops(csdev) csdev->ops->source_ops
+#define sink_ops(csdev) csdev->ops->sink_ops
+#define link_ops(csdev) csdev->ops->link_ops
+
+/**
+ * struct coresight_ops_sink - basic operations for a sink
+ * Operations available for sinks
+ * @enable: enables the sink.
+ * @disable: disables the sink.
+ */
+struct coresight_ops_sink {
+ int (*enable)(struct coresight_device *csdev);
+ void (*disable)(struct coresight_device *csdev);
+};
+
+/**
+ * struct coresight_ops_link - basic operations for a link
+ * Operations available for links.
+ * @enable: enables flow between iport and oport.
+ * @disable: disables flow between iport and oport.
+ */
+struct coresight_ops_link {
+ int (*enable)(struct coresight_device *csdev, int iport, int oport);
+ void (*disable)(struct coresight_device *csdev, int iport, int oport);
+};
+
+/**
+ * struct coresight_ops_source - basic operations for a source
+ * Operations available for sources.
+ * @trace_id: returns the value of the component's trace ID as known
+ to the HW.
+ * @enable: enables tracing from a source.
+ * @disable: disables tracing for a source.
+ */
+struct coresight_ops_source {
+ int (*trace_id)(struct coresight_device *csdev);
+ int (*enable)(struct coresight_device *csdev);
+ void (*disable)(struct coresight_device *csdev);
+};
+
+struct coresight_ops {
+ const struct coresight_ops_sink *sink_ops;
+ const struct coresight_ops_link *link_ops;
+ const struct coresight_ops_source *source_ops;
+};
+
+#ifdef CONFIG_CORESIGHT
+extern struct coresight_device *
+coresight_register(struct coresight_desc *desc);
+extern void coresight_unregister(struct coresight_device *csdev);
+extern int coresight_enable(struct coresight_device *csdev);
+extern void coresight_disable(struct coresight_device *csdev);
+extern int coresight_timeout(void __iomem *addr, u32 offset,
+ int position, int value);
+#else
+static inline struct coresight_device *
+coresight_register(struct coresight_desc *desc) { return NULL; }
+static inline void coresight_unregister(struct coresight_device *csdev) {}
+static inline int
+coresight_enable(struct coresight_device *csdev) { return -ENOSYS; }
+static inline void coresight_disable(struct coresight_device *csdev) {}
+static inline int coresight_timeout(void __iomem *addr, u32 offset,
+ int position, int value) { return 1; }
+#endif
+
+#ifdef CONFIG_OF
+extern struct coresight_platform_data *of_get_coresight_platform_data(
+ struct device *dev, struct device_node *node);
+#else
+static inline struct coresight_platform_data *of_get_coresight_platform_data(
+ struct device *dev, struct device_node *node) { return NULL; }
+#endif
+
+#endif
diff --git a/include/linux/cpufeature.h b/include/linux/cpufeature.h
new file mode 100644
index 000000000000..c4d4eb8ac9fe
--- /dev/null
+++ b/include/linux/cpufeature.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_CPUFEATURE_H
+#define __LINUX_CPUFEATURE_H
+
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+
+#include <linux/mod_devicetable.h>
+#include <asm/cpufeature.h>
+
+/*
+ * Macros imported from <asm/cpufeature.h>:
+ * - cpu_feature(x) ordinal value of feature called 'x'
+ * - cpu_have_feature(u32 n) whether feature #n is available
+ * - MAX_CPU_FEATURES upper bound for feature ordinal values
+ * Optional:
+ * - CPU_FEATURE_TYPEFMT format string fragment for printing the cpu type
+ * - CPU_FEATURE_TYPEVAL set of values matching the format string above
+ */
+
+#ifndef CPU_FEATURE_TYPEFMT
+#define CPU_FEATURE_TYPEFMT "%s"
+#endif
+
+#ifndef CPU_FEATURE_TYPEVAL
+#define CPU_FEATURE_TYPEVAL ELF_PLATFORM
+#endif
+
+/*
+ * Use module_cpu_feature_match(feature, module_init_function) to
+ * declare that
+ * a) the module shall be probed upon discovery of CPU feature 'feature'
+ * (typically at boot time using udev)
+ * b) the module must not be loaded if CPU feature 'feature' is not present
+ * (not even by manual insmod).
+ *
+ * For a list of legal values for 'feature', please consult the file
+ * 'asm/cpufeature.h' of your favorite architecture.
+ */
+#define module_cpu_feature_match(x, __init) \
+static struct cpu_feature const cpu_feature_match_ ## x[] = \
+ { { .feature = cpu_feature(x) }, { } }; \
+MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \
+ \
+static int cpu_feature_match_ ## x ## _init(void) \
+{ \
+ if (!cpu_have_feature(cpu_feature(x))) \
+ return -ENODEV; \
+ return __init(); \
+} \
+module_init(cpu_feature_match_ ## x ## _init)
+
+#endif
+#endif
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 0a819e7a60c9..2bcd8d23053a 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -153,6 +153,102 @@ typedef struct {
u8 sets_to_zero;
} efi_time_cap_t;
+typedef struct {
+ efi_table_hdr_t hdr;
+ u32 raise_tpl;
+ u32 restore_tpl;
+ u32 allocate_pages;
+ u32 free_pages;
+ u32 get_memory_map;
+ u32 allocate_pool;
+ u32 free_pool;
+ u32 create_event;
+ u32 set_timer;
+ u32 wait_for_event;
+ u32 signal_event;
+ u32 close_event;
+ u32 check_event;
+ u32 install_protocol_interface;
+ u32 reinstall_protocol_interface;
+ u32 uninstall_protocol_interface;
+ u32 handle_protocol;
+ u32 __reserved;
+ u32 register_protocol_notify;
+ u32 locate_handle;
+ u32 locate_device_path;
+ u32 install_configuration_table;
+ u32 load_image;
+ u32 start_image;
+ u32 exit;
+ u32 unload_image;
+ u32 exit_boot_services;
+ u32 get_next_monotonic_count;
+ u32 stall;
+ u32 set_watchdog_timer;
+ u32 connect_controller;
+ u32 disconnect_controller;
+ u32 open_protocol;
+ u32 close_protocol;
+ u32 open_protocol_information;
+ u32 protocols_per_handle;
+ u32 locate_handle_buffer;
+ u32 locate_protocol;
+ u32 install_multiple_protocol_interfaces;
+ u32 uninstall_multiple_protocol_interfaces;
+ u32 calculate_crc32;
+ u32 copy_mem;
+ u32 set_mem;
+ u32 create_event_ex;
+} __packed efi_boot_services_32_t;
+
+typedef struct {
+ efi_table_hdr_t hdr;
+ u64 raise_tpl;
+ u64 restore_tpl;
+ u64 allocate_pages;
+ u64 free_pages;
+ u64 get_memory_map;
+ u64 allocate_pool;
+ u64 free_pool;
+ u64 create_event;
+ u64 set_timer;
+ u64 wait_for_event;
+ u64 signal_event;
+ u64 close_event;
+ u64 check_event;
+ u64 install_protocol_interface;
+ u64 reinstall_protocol_interface;
+ u64 uninstall_protocol_interface;
+ u64 handle_protocol;
+ u64 __reserved;
+ u64 register_protocol_notify;
+ u64 locate_handle;
+ u64 locate_device_path;
+ u64 install_configuration_table;
+ u64 load_image;
+ u64 start_image;
+ u64 exit;
+ u64 unload_image;
+ u64 exit_boot_services;
+ u64 get_next_monotonic_count;
+ u64 stall;
+ u64 set_watchdog_timer;
+ u64 connect_controller;
+ u64 disconnect_controller;
+ u64 open_protocol;
+ u64 close_protocol;
+ u64 open_protocol_information;
+ u64 protocols_per_handle;
+ u64 locate_handle_buffer;
+ u64 locate_protocol;
+ u64 install_multiple_protocol_interfaces;
+ u64 uninstall_multiple_protocol_interfaces;
+ u64 calculate_crc32;
+ u64 copy_mem;
+ u64 set_mem;
+ u64 create_event_ex;
+} __packed efi_boot_services_64_t;
+
/*
* EFI Boot Services table
*/
@@ -231,6 +327,15 @@ typedef enum {
EfiPciIoAttributeOperationMaximum
} EFI_PCI_IO_PROTOCOL_ATTRIBUTE_OPERATION;
+typedef struct {
+ u32 read;
+ u32 write;
+} efi_pci_io_protocol_access_32_t;
+
+typedef struct {
+ u64 read;
+ u64 write;
+} efi_pci_io_protocol_access_64_t;
typedef struct {
void *read;
@@ -238,6 +343,46 @@ typedef struct {
} efi_pci_io_protocol_access_t;
typedef struct {
+ u32 poll_mem;
+ u32 poll_io;
+ efi_pci_io_protocol_access_32_t mem;
+ efi_pci_io_protocol_access_32_t io;
+ efi_pci_io_protocol_access_32_t pci;
+ u32 copy_mem;
+ u32 map;
+ u32 unmap;
+ u32 allocate_buffer;
+ u32 free_buffer;
+ u32 flush;
+ u32 get_location;
+ u32 attributes;
+ u32 get_bar_attributes;
+ u32 set_bar_attributes;
+ uint64_t romsize;
+ void *romimage;
+} efi_pci_io_protocol_32;
+
+typedef struct {
+ u64 poll_mem;
+ u64 poll_io;
+ efi_pci_io_protocol_access_64_t mem;
+ efi_pci_io_protocol_access_64_t io;
+ efi_pci_io_protocol_access_64_t pci;
+ u64 copy_mem;
+ u64 map;
+ u64 unmap;
+ u64 allocate_buffer;
+ u64 free_buffer;
+ u64 flush;
+ u64 get_location;
+ u64 attributes;
+ u64 get_bar_attributes;
+ u64 set_bar_attributes;
+ uint64_t romsize;
+ void *romimage;
+} efi_pci_io_protocol_64;
+
+typedef struct {
void *poll_mem;
void *poll_io;
efi_pci_io_protocol_access_t mem;
@@ -292,6 +437,42 @@ typedef struct {
typedef struct {
efi_table_hdr_t hdr;
+ u32 get_time;
+ u32 set_time;
+ u32 get_wakeup_time;
+ u32 set_wakeup_time;
+ u32 set_virtual_address_map;
+ u32 convert_pointer;
+ u32 get_variable;
+ u32 get_next_variable;
+ u32 set_variable;
+ u32 get_next_high_mono_count;
+ u32 reset_system;
+ u32 update_capsule;
+ u32 query_capsule_caps;
+ u32 query_variable_info;
+} efi_runtime_services_32_t;
+
+typedef struct {
+ efi_table_hdr_t hdr;
+ u64 get_time;
+ u64 set_time;
+ u64 get_wakeup_time;
+ u64 set_wakeup_time;
+ u64 set_virtual_address_map;
+ u64 convert_pointer;
+ u64 get_variable;
+ u64 get_next_variable;
+ u64 set_variable;
+ u64 get_next_high_mono_count;
+ u64 reset_system;
+ u64 update_capsule;
+ u64 query_capsule_caps;
+ u64 query_variable_info;
+} efi_runtime_services_64_t;
+
+typedef struct {
+ efi_table_hdr_t hdr;
void *get_time;
void *set_time;
void *get_wakeup_time;
@@ -394,6 +575,9 @@ typedef efi_status_t efi_query_variable_store_t(u32 attributes, unsigned long si
#define EFI_FILE_SYSTEM_GUID \
EFI_GUID( 0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b )
+#define DEVICE_TREE_GUID \
+ EFI_GUID( 0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0 )
+
typedef struct {
efi_guid_t guid;
u64 table;
@@ -483,6 +667,46 @@ struct efi_memory_map {
unsigned long desc_size;
};
+struct efi_fdt_params {
+ u64 system_table;
+ u64 mmap;
+ u32 mmap_size;
+ u32 desc_size;
+ u32 desc_ver;
+};
+
+typedef struct {
+ u32 revision;
+ u32 parent_handle;
+ u32 system_table;
+ u32 device_handle;
+ u32 file_path;
+ u32 reserved;
+ u32 load_options_size;
+ u32 load_options;
+ u32 image_base;
+ __aligned_u64 image_size;
+ unsigned int image_code_type;
+ unsigned int image_data_type;
+ unsigned long unload;
+} efi_loaded_image_32_t;
+
+typedef struct {
+ u32 revision;
+ u64 parent_handle;
+ u64 system_table;
+ u64 device_handle;
+ u64 file_path;
+ u64 reserved;
+ u32 load_options_size;
+ u64 load_options;
+ u64 image_base;
+ __aligned_u64 image_size;
+ unsigned int image_code_type;
+ unsigned int image_data_type;
+ unsigned long unload;
+} efi_loaded_image_64_t;
+
typedef struct {
u32 revision;
void *parent_handle;
@@ -511,6 +735,34 @@ typedef struct {
efi_char16_t filename[1];
} efi_file_info_t;
+typedef struct {
+ u64 revision;
+ u32 open;
+ u32 close;
+ u32 delete;
+ u32 read;
+ u32 write;
+ u32 get_position;
+ u32 set_position;
+ u32 get_info;
+ u32 set_info;
+ u32 flush;
+} efi_file_handle_32_t;
+
+typedef struct {
+ u64 revision;
+ u64 open;
+ u64 close;
+ u64 delete;
+ u64 read;
+ u64 write;
+ u64 get_position;
+ u64 set_position;
+ u64 get_info;
+ u64 set_info;
+ u64 flush;
+} efi_file_handle_64_t;
+
typedef struct _efi_file_handle {
u64 revision;
efi_status_t (*open)(struct _efi_file_handle *,
@@ -573,6 +825,7 @@ extern struct efi {
efi_reset_system_t *reset_system;
efi_set_virtual_address_map_t *set_virtual_address_map;
struct efi_memory_map *memmap;
+ unsigned long flags;
} efi;
static inline int
@@ -619,8 +872,15 @@ extern void efi_initialize_iomem_resources(struct resource *code_resource,
extern void efi_get_time(struct timespec *now);
extern int efi_set_rtc_mmss(const struct timespec *now);
extern void efi_reserve_boot_services(void);
+extern int efi_get_fdt_params(struct efi_fdt_params *params, int verbose);
extern struct efi_memory_map memmap;
+/* Iterate through an efi_memory_map */
+#define for_each_efi_memory_desc(m, md) \
+ for ((md) = (m)->map; \
+ (md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \
+ (md) = (void *)(md) + (m)->desc_size)
+
/**
* efi_range_is_wc - check the WC bit on an address range
* @start: starting kvirt address
@@ -659,18 +919,17 @@ extern int __init efi_setup_pcdp_console(char *);
#define EFI_ARCH_1 6 /* First arch-specific bit */
#ifdef CONFIG_EFI
-# ifdef CONFIG_X86
-extern int efi_enabled(int facility);
-# else
-static inline int efi_enabled(int facility)
+/*
+ * Test whether the above EFI_* bits are enabled.
+ */
+static inline bool efi_enabled(int feature)
{
- return 1;
+ return test_bit(feature, &efi.flags) != 0;
}
-# endif
#else
-static inline int efi_enabled(int facility)
+static inline bool efi_enabled(int feature)
{
- return 0;
+ return false;
}
#endif
@@ -792,8 +1051,10 @@ struct efivars {
* and we use a page for reading/writing.
*/
+#define EFI_VAR_NAME_LEN 1024
+
struct efi_variable {
- efi_char16_t VariableName[1024/sizeof(efi_char16_t)];
+ efi_char16_t VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)];
efi_guid_t VendorGuid;
unsigned long DataSize;
__u8 Data[1024];
@@ -809,6 +1070,17 @@ struct efivar_entry {
bool deleting;
};
+struct efi_simple_text_output_protocol_32 {
+ u32 reset;
+ u32 output_string;
+ u32 test_string;
+};
+
+struct efi_simple_text_output_protocol_64 {
+ u64 reset;
+ u64 output_string;
+ u64 test_string;
+};
struct efi_simple_text_output_protocol {
void *reset;
@@ -816,6 +1088,7 @@ struct efi_simple_text_output_protocol {
void *test_string;
};
+
extern struct list_head efivar_sysfs_list;
static inline void
@@ -864,7 +1137,7 @@ int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
struct list_head *head, bool remove);
-bool efivar_validate(struct efi_variable *var, u8 *data, unsigned long len);
+bool efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len);
extern struct work_struct efivar_work;
void efivar_run_worker(void);
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 2068dff8a2cc..92d0597b7bba 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -605,25 +605,27 @@ static inline void __ftrace_enabled_restore(int enabled)
#endif
}
-#ifndef HAVE_ARCH_CALLER_ADDR
+/* All archs should have this, but we define it for consistency */
+#ifndef ftrace_return_address0
+# define ftrace_return_address0 __builtin_return_address(0)
+#endif
+
+/* Archs may use other ways for ADDR1 and beyond */
+#ifndef ftrace_return_address
# ifdef CONFIG_FRAME_POINTER
-# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
-# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
-# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
-# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
-# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
-# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
-# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
+# define ftrace_return_address(n) __builtin_return_address(n)
# else
-# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
-# define CALLER_ADDR1 0UL
-# define CALLER_ADDR2 0UL
-# define CALLER_ADDR3 0UL
-# define CALLER_ADDR4 0UL
-# define CALLER_ADDR5 0UL
-# define CALLER_ADDR6 0UL
+# define ftrace_return_address(n) 0UL
# endif
-#endif /* ifndef HAVE_ARCH_CALLER_ADDR */
+#endif
+
+#define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
+#define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
+#define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
+#define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
+#define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
+#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
+#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
#ifdef CONFIG_IRQSOFF_TRACER
extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
new file mode 100644
index 000000000000..03a4ea37ba86
--- /dev/null
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H
+#define __LINUX_IRQCHIP_ARM_GIC_V3_H
+
+#include <asm/sysreg.h>
+
+/*
+ * Distributor registers. We assume we're running non-secure, with ARE
+ * being set. Secure-only and non-ARE registers are not described.
+ */
+#define GICD_CTLR 0x0000
+#define GICD_TYPER 0x0004
+#define GICD_IIDR 0x0008
+#define GICD_STATUSR 0x0010
+#define GICD_SETSPI_NSR 0x0040
+#define GICD_CLRSPI_NSR 0x0048
+#define GICD_SETSPI_SR 0x0050
+#define GICD_CLRSPI_SR 0x0058
+#define GICD_SEIR 0x0068
+#define GICD_ISENABLER 0x0100
+#define GICD_ICENABLER 0x0180
+#define GICD_ISPENDR 0x0200
+#define GICD_ICPENDR 0x0280
+#define GICD_ISACTIVER 0x0300
+#define GICD_ICACTIVER 0x0380
+#define GICD_IPRIORITYR 0x0400
+#define GICD_ICFGR 0x0C00
+#define GICD_IROUTER 0x6000
+#define GICD_PIDR2 0xFFE8
+
+#define GICD_CTLR_RWP (1U << 31)
+#define GICD_CTLR_ARE_NS (1U << 4)
+#define GICD_CTLR_ENABLE_G1A (1U << 1)
+#define GICD_CTLR_ENABLE_G1 (1U << 0)
+
+#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
+#define GICD_IROUTER_SPI_MODE_ANY (1U << 31)
+
+#define GIC_PIDR2_ARCH_MASK 0xf0
+#define GIC_PIDR2_ARCH_GICv3 0x30
+#define GIC_PIDR2_ARCH_GICv4 0x40
+
+/*
+ * Re-Distributor registers, offsets from RD_base
+ */
+#define GICR_CTLR GICD_CTLR
+#define GICR_IIDR 0x0004
+#define GICR_TYPER 0x0008
+#define GICR_STATUSR GICD_STATUSR
+#define GICR_WAKER 0x0014
+#define GICR_SETLPIR 0x0040
+#define GICR_CLRLPIR 0x0048
+#define GICR_SEIR GICD_SEIR
+#define GICR_PROPBASER 0x0070
+#define GICR_PENDBASER 0x0078
+#define GICR_INVLPIR 0x00A0
+#define GICR_INVALLR 0x00B0
+#define GICR_SYNCR 0x00C0
+#define GICR_MOVLPIR 0x0100
+#define GICR_MOVALLR 0x0110
+#define GICR_PIDR2 GICD_PIDR2
+
+#define GICR_WAKER_ProcessorSleep (1U << 1)
+#define GICR_WAKER_ChildrenAsleep (1U << 2)
+
+/*
+ * Re-Distributor registers, offsets from SGI_base
+ */
+#define GICR_ISENABLER0 GICD_ISENABLER
+#define GICR_ICENABLER0 GICD_ICENABLER
+#define GICR_ISPENDR0 GICD_ISPENDR
+#define GICR_ICPENDR0 GICD_ICPENDR
+#define GICR_ISACTIVER0 GICD_ISACTIVER
+#define GICR_ICACTIVER0 GICD_ICACTIVER
+#define GICR_IPRIORITYR0 GICD_IPRIORITYR
+#define GICR_ICFGR0 GICD_ICFGR
+
+#define GICR_TYPER_VLPIS (1U << 1)
+#define GICR_TYPER_LAST (1U << 4)
+
+/*
+ * CPU interface registers
+ */
+#define ICC_CTLR_EL1_EOImode_drop_dir (0U << 1)
+#define ICC_CTLR_EL1_EOImode_drop (1U << 1)
+#define ICC_SRE_EL1_SRE (1U << 0)
+
+/*
+ * Hypervisor interface registers (SRE only)
+ */
+#define ICH_LR_VIRTUAL_ID_MASK ((1UL << 32) - 1)
+
+#define ICH_LR_EOI (1UL << 41)
+#define ICH_LR_GROUP (1UL << 60)
+#define ICH_LR_STATE (3UL << 62)
+#define ICH_LR_PENDING_BIT (1UL << 62)
+#define ICH_LR_ACTIVE_BIT (1UL << 63)
+
+#define ICH_MISR_EOI (1 << 0)
+#define ICH_MISR_U (1 << 1)
+
+#define ICH_HCR_EN (1 << 0)
+#define ICH_HCR_UIE (1 << 1)
+
+#define ICH_VMCR_CTLR_SHIFT 0
+#define ICH_VMCR_CTLR_MASK (0x21f << ICH_VMCR_CTLR_SHIFT)
+#define ICH_VMCR_BPR1_SHIFT 18
+#define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT)
+#define ICH_VMCR_BPR0_SHIFT 21
+#define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT)
+#define ICH_VMCR_PMR_SHIFT 24
+#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT)
+
+#define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
+#define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
+#define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
+#define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
+#define ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4)
+#define ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5)
+#define ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
+
+#define ICC_IAR1_EL1_SPURIOUS 0x3ff
+
+#define ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5)
+
+#define ICC_SRE_EL2_SRE (1 << 0)
+#define ICC_SRE_EL2_ENABLE (1 << 3)
+
+/*
+ * System register definitions
+ */
+#define ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4)
+#define ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0)
+#define ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1)
+#define ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2)
+#define ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3)
+#define ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5)
+#define ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7)
+
+#define __LR0_EL2(x) sys_reg(3, 4, 12, 12, x)
+#define __LR8_EL2(x) sys_reg(3, 4, 12, 13, x)
+
+#define ICH_LR0_EL2 __LR0_EL2(0)
+#define ICH_LR1_EL2 __LR0_EL2(1)
+#define ICH_LR2_EL2 __LR0_EL2(2)
+#define ICH_LR3_EL2 __LR0_EL2(3)
+#define ICH_LR4_EL2 __LR0_EL2(4)
+#define ICH_LR5_EL2 __LR0_EL2(5)
+#define ICH_LR6_EL2 __LR0_EL2(6)
+#define ICH_LR7_EL2 __LR0_EL2(7)
+#define ICH_LR8_EL2 __LR8_EL2(0)
+#define ICH_LR9_EL2 __LR8_EL2(1)
+#define ICH_LR10_EL2 __LR8_EL2(2)
+#define ICH_LR11_EL2 __LR8_EL2(3)
+#define ICH_LR12_EL2 __LR8_EL2(4)
+#define ICH_LR13_EL2 __LR8_EL2(5)
+#define ICH_LR14_EL2 __LR8_EL2(6)
+#define ICH_LR15_EL2 __LR8_EL2(7)
+
+#define __AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x)
+#define ICH_AP0R0_EL2 __AP0Rx_EL2(0)
+#define ICH_AP0R1_EL2 __AP0Rx_EL2(1)
+#define ICH_AP0R2_EL2 __AP0Rx_EL2(2)
+#define ICH_AP0R3_EL2 __AP0Rx_EL2(3)
+
+#define __AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x)
+#define ICH_AP1R0_EL2 __AP1Rx_EL2(0)
+#define ICH_AP1R1_EL2 __AP1Rx_EL2(1)
+#define ICH_AP1R2_EL2 __AP1Rx_EL2(2)
+#define ICH_AP1R3_EL2 __AP1Rx_EL2(3)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/stringify.h>
+
+static inline void gic_write_eoir(u64 irq)
+{
+ asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq));
+ isb();
+}
+
+#endif
+
+#endif
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 0ceb389dba6c..45e2d8c15bd2 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -21,6 +21,8 @@
#define GIC_CPU_ACTIVEPRIO 0xd0
#define GIC_CPU_IDENT 0xfc
+#define GICC_IAR_INT_ID_MASK 0x3ff
+
#define GIC_DIST_CTRL 0x000
#define GIC_DIST_CTR 0x004
#define GIC_DIST_IGROUP 0x080
@@ -93,6 +95,11 @@ int gic_get_cpu_id(unsigned int cpu);
void gic_migrate_target(unsigned int new_cpu_id);
unsigned long gic_get_sgir_physaddr(void);
+extern const struct irq_domain_ops *gic_routable_irq_domain_ops;
+static inline void __init register_routable_domain_ops
+ (const struct irq_domain_ops *ops)
+{
+ gic_routable_irq_domain_ops = ops;
+}
#endif /* __ASSEMBLY */
-
#endif
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index b8e9a43e501a..55d75a7cba73 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -138,8 +138,6 @@ static inline bool is_error_page(struct page *page)
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
-struct kvm;
-struct kvm_vcpu;
extern struct kmem_cache *kvm_vcpu_cache;
extern spinlock_t kvm_lock;
@@ -192,7 +190,7 @@ struct kvm_async_pf {
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
-int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
+int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
struct kvm_arch_async_pf *arch);
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif
@@ -313,25 +311,6 @@ struct kvm_kernel_irq_routing_entry {
struct hlist_node link;
};
-#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
-
-struct kvm_irq_routing_table {
- int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
- struct kvm_kernel_irq_routing_entry *rt_entries;
- u32 nr_rt_entries;
- /*
- * Array indexed by gsi. Each entry contains list of irq chips
- * the gsi is connected to.
- */
- struct hlist_head map[0];
-};
-
-#else
-
-struct kvm_irq_routing_table {};
-
-#endif
-
#ifndef KVM_PRIVATE_MEM_SLOTS
#define KVM_PRIVATE_MEM_SLOTS 0
#endif
@@ -358,6 +337,7 @@ struct kvm {
struct mm_struct *mm; /* userspace tied to this vm */
struct kvm_memslots *memslots;
struct srcu_struct srcu;
+ struct srcu_struct irq_srcu;
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
u32 bsp_vcpu_id;
#endif
@@ -388,11 +368,12 @@ struct kvm {
struct mutex irq_lock;
#ifdef CONFIG_HAVE_KVM_IRQCHIP
/*
- * Update side is protected by irq_lock and,
- * if configured, irqfds.lock.
+ * Update side is protected by irq_lock.
*/
struct kvm_irq_routing_table __rcu *irq_routing;
struct hlist_head mask_notifier_list;
+#endif
+#ifdef CONFIG_HAVE_KVM_IRQFD
struct hlist_head irq_ack_notifier_list;
#endif
@@ -442,7 +423,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
int __must_check vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
-#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
+#ifdef CONFIG_HAVE_KVM_IRQFD
int kvm_irqfd_init(void);
void kvm_irqfd_exit(void);
#else
@@ -531,6 +512,8 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
+unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
+ bool *writable);
void kvm_release_page_clean(struct page *page);
void kvm_release_page_dirty(struct page *page);
void kvm_set_page_accessed(struct page *page);
@@ -589,7 +572,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
-int kvm_dev_ioctl_check_extension(long ext);
+int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
int kvm_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log, int *is_dirty);
@@ -627,6 +610,8 @@ void kvm_arch_exit(void);
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
+void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
+
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
@@ -635,8 +620,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
-int kvm_arch_hardware_enable(void *garbage);
-void kvm_arch_hardware_disable(void *garbage);
+int kvm_arch_hardware_enable(void);
+void kvm_arch_hardware_disable(void);
int kvm_arch_hardware_setup(void);
void kvm_arch_hardware_unsetup(void);
void kvm_arch_check_processor_compat(void *rtn);
@@ -739,6 +724,10 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
bool mask);
+int kvm_irq_map_gsi(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *entries, int gsi);
+int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
+
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
bool line_status);
int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
@@ -868,6 +857,13 @@ static inline hpa_t pfn_to_hpa(pfn_t pfn)
return (hpa_t)pfn << PAGE_SHIFT;
}
+static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
+{
+ unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
+
+ return kvm_is_error_hva(hva);
+}
+
static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
{
set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
@@ -918,28 +914,27 @@ int kvm_set_irq_routing(struct kvm *kvm,
const struct kvm_irq_routing_entry *entries,
unsigned nr,
unsigned flags);
-int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
- struct kvm_kernel_irq_routing_entry *e,
+int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue);
void kvm_free_irq_routing(struct kvm *kvm);
-int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
-
#else
static inline void kvm_free_irq_routing(struct kvm *kvm) {}
#endif
+int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
+
#ifdef CONFIG_HAVE_KVM_EVENTFD
void kvm_eventfd_init(struct kvm *kvm);
int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
-#ifdef CONFIG_HAVE_KVM_IRQCHIP
+#ifdef CONFIG_HAVE_KVM_IRQFD
int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
void kvm_irqfd_release(struct kvm *kvm);
-void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
+void kvm_irq_routing_update(struct kvm *);
#else
static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
{
@@ -961,10 +956,8 @@ static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
static inline void kvm_irqfd_release(struct kvm *kvm) {}
#ifdef CONFIG_HAVE_KVM_IRQCHIP
-static inline void kvm_irq_routing_update(struct kvm *kvm,
- struct kvm_irq_routing_table *irq_rt)
+static inline void kvm_irq_routing_update(struct kvm *kvm)
{
- rcu_assign_pointer(kvm->irq_routing, irq_rt);
}
#endif
@@ -1025,8 +1018,6 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
extern bool kvm_rebooting;
-struct kvm_device_ops;
-
struct kvm_device {
struct kvm_device_ops *ops;
struct kvm *kvm;
@@ -1059,11 +1050,11 @@ struct kvm_device_ops {
void kvm_device_get(struct kvm_device *dev);
void kvm_device_put(struct kvm_device *dev);
struct kvm_device *kvm_device_from_filp(struct file *filp);
+int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type);
extern struct kvm_device_ops kvm_mpic_ops;
extern struct kvm_device_ops kvm_xics_ops;
extern struct kvm_device_ops kvm_vfio_ops;
-extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index b0bcce0ddc95..b606bb689a3e 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -17,6 +17,20 @@
#ifndef __KVM_TYPES_H__
#define __KVM_TYPES_H__
+struct kvm;
+struct kvm_async_pf;
+struct kvm_device_ops;
+struct kvm_interrupt;
+struct kvm_irq_routing_table;
+struct kvm_memory_slot;
+struct kvm_one_reg;
+struct kvm_run;
+struct kvm_userspace_memory_region;
+struct kvm_vcpu;
+struct kvm_vcpu_init;
+
+enum kvm_mr_change;
+
#include <asm/types.h>
/*
diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h
new file mode 100644
index 000000000000..307d9cab2026
--- /dev/null
+++ b/include/linux/mailbox_client.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2013-2014 Linaro Ltd.
+ * Author: Jassi Brar <jassisinghbrar@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MAILBOX_CLIENT_H
+#define __MAILBOX_CLIENT_H
+
+#include <linux/of.h>
+#include <linux/device.h>
+
+struct mbox_chan;
+
+/**
+ * struct mbox_client - User of a mailbox
+ * @dev: The client device
+ * @tx_block: If the mbox_send_message should block until data is
+ * transmitted.
+ * @tx_tout: Max block period in ms before TX is assumed failure
+ * @knows_txdone: If the client could run the TX state machine. Usually
+ * if the client receives some ACK packet for transmission.
+ * Unused if the controller already has TX_Done/RTR IRQ.
+ * @rx_callback: Atomic callback to provide client the data received
+ * @tx_done: Atomic callback to tell client of data transmission
+ */
+struct mbox_client {
+ struct device *dev;
+ bool tx_block;
+ unsigned long tx_tout;
+ bool knows_txdone;
+
+ void (*rx_callback)(struct mbox_client *cl, void *mssg);
+ void (*tx_done)(struct mbox_client *cl, void *mssg, int r);
+};
+
+struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index);
+int mbox_send_message(struct mbox_chan *chan, void *mssg);
+void mbox_client_txdone(struct mbox_chan *chan, int r); /* atomic */
+bool mbox_client_peek_data(struct mbox_chan *chan); /* atomic */
+void mbox_free_channel(struct mbox_chan *chan); /* may sleep */
+
+#endif /* __MAILBOX_CLIENT_H */
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
new file mode 100644
index 000000000000..d4cf96f07cfc
--- /dev/null
+++ b/include/linux/mailbox_controller.h
@@ -0,0 +1,133 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MAILBOX_CONTROLLER_H
+#define __MAILBOX_CONTROLLER_H
+
+#include <linux/of.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/device.h>
+#include <linux/completion.h>
+
+struct mbox_chan;
+
+/**
+ * struct mbox_chan_ops - methods to control mailbox channels
+ * @send_data: The API asks the MBOX controller driver, in atomic
+ * context try to transmit a message on the bus. Returns 0 if
+ * data is accepted for transmission, -EBUSY while rejecting
+ * if the remote hasn't yet read the last data sent. Actual
+ * transmission of data is reported by the controller via
+ * mbox_chan_txdone (if it has some TX ACK irq). It must not
+ * sleep.
+ * @startup: Called when a client requests the chan. The controller
+ * could ask clients for additional parameters of communication
+ * to be provided via client's chan_data. This call may
+ * block. After this call the Controller must forward any
+ * data received on the chan by calling mbox_chan_received_data.
+ * The controller may do stuff that need to sleep.
+ * @shutdown: Called when a client relinquishes control of a chan.
+ * This call may block too. The controller must not forward
+ * any received data anymore.
+ * The controller may do stuff that need to sleep.
+ * @last_tx_done: If the controller sets 'txdone_poll', the API calls
+ * this to poll status of last TX. The controller must
+ * give priority to IRQ method over polling and never
+ * set both txdone_poll and txdone_irq. Only in polling
+ * mode 'send_data' is expected to return -EBUSY.
+ * The controller may do stuff that need to sleep/block.
+ * Used only if txdone_poll:=true && txdone_irq:=false
+ * @peek_data: Atomic check for any received data. Return true if controller
+ * has some data to push to the client. False otherwise.
+ */
+struct mbox_chan_ops {
+ int (*send_data)(struct mbox_chan *chan, void *data);
+ int (*startup)(struct mbox_chan *chan);
+ void (*shutdown)(struct mbox_chan *chan);
+ bool (*last_tx_done)(struct mbox_chan *chan);
+ bool (*peek_data)(struct mbox_chan *chan);
+};
+
+/**
+ * struct mbox_controller - Controller of a class of communication channels
+ * @dev: Device backing this controller
+ * @ops: Operators that work on each communication chan
+ * @chans: Array of channels
+ * @num_chans: Number of channels in the 'chans' array.
+ * @txdone_irq: Indicates if the controller can report to API when
+ * the last transmitted data was read by the remote.
+ * Eg, if it has some TX ACK irq.
+ * @txdone_poll: If the controller can read but not report the TX
+ * done. Ex, some register shows the TX status but
+ * no interrupt rises. Ignored if 'txdone_irq' is set.
+ * @txpoll_period: If 'txdone_poll' is in effect, the API polls for
+ * last TX's status after these many millisecs
+ * @of_xlate: Controller driver specific mapping of channel via DT
+ * @poll: API private. Used to poll for TXDONE on all channels.
+ * @node: API private. To hook into list of controllers.
+ */
+struct mbox_controller {
+ struct device *dev;
+ struct mbox_chan_ops *ops;
+ struct mbox_chan *chans;
+ int num_chans;
+ bool txdone_irq;
+ bool txdone_poll;
+ unsigned txpoll_period;
+ struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp);
+ /* Internal to API */
+ struct timer_list poll;
+ struct list_head node;
+};
+
+/*
+ * The length of circular buffer for queuing messages from a client.
+ * 'msg_count' tracks the number of buffered messages while 'msg_free'
+ * is the index where the next message would be buffered.
+ * We shouldn't need it too big because every transfer is interrupt
+ * triggered and if we have lots of data to transfer, the interrupt
+ * latencies are going to be the bottleneck, not the buffer length.
+ * Besides, mbox_send_message could be called from atomic context and
+ * the client could also queue another message from the notifier 'tx_done'
+ * of the last transfer done.
+ * REVISIT: If too many platforms see the "Try increasing MBOX_TX_QUEUE_LEN"
+ * print, it needs to be taken from config option or somesuch.
+ */
+#define MBOX_TX_QUEUE_LEN 20
+
+/**
+ * struct mbox_chan - s/w representation of a communication chan
+ * @mbox: Pointer to the parent/provider of this channel
+ * @txdone_method: Way to detect TXDone chosen by the API
+ * @cl: Pointer to the current owner of this channel
+ * @tx_complete: Transmission completion
+ * @active_req: Currently active request hook
+ * @msg_count: No. of mssg currently queued
+ * @msg_free: Index of next available mssg slot
+ * @msg_data: Hook for data packet
+ * @lock: Serialise access to the channel
+ * @con_priv: Hook for controller driver to attach private data
+ */
+struct mbox_chan {
+ struct mbox_controller *mbox;
+ unsigned txdone_method;
+ struct mbox_client *cl;
+ struct completion tx_complete;
+ void *active_req;
+ unsigned msg_count, msg_free;
+ void *msg_data[MBOX_TX_QUEUE_LEN];
+ spinlock_t lock; /* Serialise access to the channel */
+ void *con_priv;
+};
+
+int mbox_controller_register(struct mbox_controller *mbox); /* can sleep */
+void mbox_controller_unregister(struct mbox_controller *mbox); /* can sleep */
+void mbox_chan_received_data(struct mbox_chan *chan, void *data); /* atomic */
+void mbox_chan_txdone(struct mbox_chan *chan, int r); /* atomic */
+
+#endif /* __MAILBOX_CONTROLLER_H */
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 45e921401b06..f2ac87c613a5 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -564,6 +564,15 @@ struct x86_cpu_id {
#define X86_MODEL_ANY 0
#define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */
+/*
+ * Generic table type for matching CPU features.
+ * @feature: the bit number of the feature (0 - 65535)
+ */
+
+struct cpu_feature {
+ __u16 feature;
+};
+
#define IPACK_ANY_FORMAT 0xff
#define IPACK_ANY_ID (~0)
struct ipack_device_id {
diff --git a/include/linux/of.h b/include/linux/of.h
index 3f8144dadaef..6f821dc62b8d 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -18,11 +18,12 @@
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/errno.h>
-#include <linux/kref.h>
+#include <linux/kobject.h>
#include <linux/mod_devicetable.h>
#include <linux/spinlock.h>
#include <linux/topology.h>
#include <linux/notifier.h>
+#include <linux/list.h>
#include <asm/byteorder.h>
#include <asm/errno.h>
@@ -37,6 +38,7 @@ struct property {
struct property *next;
unsigned long _flags;
unsigned int unique_id;
+ struct bin_attribute attr;
};
#if defined(CONFIG_SPARC)
@@ -56,8 +58,7 @@ struct device_node {
struct device_node *sibling;
struct device_node *next; /* next device of same type */
struct device_node *allnext; /* next in list of all nodes */
- struct proc_dir_entry *pde; /* this node's proc directory */
- struct kref kref;
+ struct kobject kobj;
unsigned long _flags;
void *data;
#if defined(CONFIG_SPARC)
@@ -74,6 +75,31 @@ struct of_phandle_args {
uint32_t args[MAX_PHANDLE_ARGS];
};
+struct of_reconfig_data {
+ struct device_node *dn;
+ struct property *prop;
+ struct property *old_prop;
+};
+
+/* initialize a node */
+extern struct kobj_type of_node_ktype;
+static inline void of_node_init(struct device_node *node)
+{
+ kobject_init(&node->kobj, &of_node_ktype);
+}
+
+/* true when node is initialized */
+static inline int of_node_is_initialized(struct device_node *node)
+{
+ return node && node->kobj.state_initialized;
+}
+
+/* true when node is attached (i.e. present on sysfs) */
+static inline int of_node_is_attached(struct device_node *node)
+{
+ return node && node->kobj.state_in_sysfs;
+}
+
#ifdef CONFIG_OF_DYNAMIC
extern struct device_node *of_node_get(struct device_node *node);
extern void of_node_put(struct device_node *node);
@@ -109,11 +135,37 @@ static inline int of_node_check_flag(struct device_node *n, unsigned long flag)
return test_bit(flag, &n->_flags);
}
+static inline int of_node_test_and_set_flag(struct device_node *n,
+ unsigned long flag)
+{
+ return test_and_set_bit(flag, &n->_flags);
+}
+
static inline void of_node_set_flag(struct device_node *n, unsigned long flag)
{
set_bit(flag, &n->_flags);
}
+static inline void of_node_clear_flag(struct device_node *n, unsigned long flag)
+{
+ clear_bit(flag, &n->_flags);
+}
+
+static inline int of_property_check_flag(struct property *p, unsigned long flag)
+{
+ return test_bit(flag, &p->_flags);
+}
+
+static inline void of_property_set_flag(struct property *p, unsigned long flag)
+{
+ set_bit(flag, &p->_flags);
+}
+
+static inline void of_property_clear_flag(struct property *p, unsigned long flag)
+{
+ clear_bit(flag, &p->_flags);
+}
+
extern struct device_node *of_find_all_nodes(struct device_node *prev);
/*
@@ -156,6 +208,8 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
/* flag descriptions */
#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
#define OF_DETACHED 2 /* node has been detached from the device tree */
+#define OF_POPULATED 3 /* device already created for the node */
+#define OF_POPULATED_BUS 4 /* of_platform_populate recursed to children of this node */
#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
@@ -167,6 +221,8 @@ static inline const char *of_node_full_name(const struct device_node *np)
return np ? np->full_name : "<no-node>";
}
+#define for_each_of_allnodes(dn) \
+ for (dn = of_allnodes; dn; dn = dn->allnext)
extern struct device_node *of_find_node_by_name(struct device_node *from,
const char *name);
extern struct device_node *of_find_node_by_type(struct device_node *from,
@@ -198,6 +254,8 @@ extern struct device_node *of_find_node_with_property(
extern struct property *of_find_property(const struct device_node *np,
const char *name,
int *lenp);
+extern int of_property_count_elems_of_size(const struct device_node *np,
+ const char *propname, int elem_size);
extern int of_property_read_u32_index(const struct device_node *np,
const char *propname,
u32 index, u32 *out_value);
@@ -265,15 +323,6 @@ extern int of_update_property(struct device_node *np, struct property *newprop);
#define OF_RECONFIG_REMOVE_PROPERTY 0x0004
#define OF_RECONFIG_UPDATE_PROPERTY 0x0005
-struct of_prop_reconfig {
- struct device_node *dn;
- struct property *prop;
-};
-
-extern int of_reconfig_notifier_register(struct notifier_block *);
-extern int of_reconfig_notifier_unregister(struct notifier_block *);
-extern int of_reconfig_notify(unsigned long, void *);
-
extern int of_attach_node(struct device_node *);
extern int of_detach_node(struct device_node *);
@@ -388,6 +437,12 @@ static inline struct device_node *of_find_compatible_node(
return NULL;
}
+static inline int of_property_count_elems_of_size(const struct device_node *np,
+ const char *propname, int elem_size)
+{
+ return -ENOSYS;
+}
+
static inline int of_property_read_u32_index(const struct device_node *np,
const char *propname, u32 index, u32 *out_value)
{
@@ -591,6 +646,74 @@ static inline int of_property_read_string_index(struct device_node *np,
return rc < 0 ? rc : 0;
}
+/*
+ * of_property_count_u8_elems - Count the number of u8 elements in a property
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ *
+ * Search for a property in a device node and count the number of u8 elements
+ * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * not exist or its length does not match a multiple of u8 and -ENODATA if the
+ * property does not have a value.
+ */
+static inline int of_property_count_u8_elems(const struct device_node *np,
+ const char *propname)
+{
+ return of_property_count_elems_of_size(np, propname, sizeof(u8));
+}
+
+/**
+ * of_property_count_u16_elems - Count the number of u16 elements in a property
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ *
+ * Search for a property in a device node and count the number of u16 elements
+ * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * not exist or its length does not match a multiple of u16 and -ENODATA if the
+ * property does not have a value.
+ */
+static inline int of_property_count_u16_elems(const struct device_node *np,
+ const char *propname)
+{
+ return of_property_count_elems_of_size(np, propname, sizeof(u16));
+}
+
+/**
+ * of_property_count_u32_elems - Count the number of u32 elements in a property
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ *
+ * Search for a property in a device node and count the number of u32 elements
+ * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * not exist or its length does not match a multiple of u32 and -ENODATA if the
+ * property does not have a value.
+ */
+static inline int of_property_count_u32_elems(const struct device_node *np,
+ const char *propname)
+{
+ return of_property_count_elems_of_size(np, propname, sizeof(u32));
+}
+
+/**
+ * of_property_count_u64_elems - Count the number of u64 elements in a property
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ *
+ * Search for a property in a device node and count the number of u64 elements
+ * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * not exist or its length does not match a multiple of u64 and -ENODATA if the
+ * property does not have a value.
+ */
+static inline int of_property_count_u64_elems(const struct device_node *np,
+ const char *propname)
+{
+ return of_property_count_elems_of_size(np, propname, sizeof(u64));
+}
+
/**
* of_property_read_bool - Findfrom a property
* @np: device node from which the property value is to be read.
@@ -689,14 +812,166 @@ static inline int of_get_available_child_count(const struct device_node *np)
return num;
}
-#if defined(CONFIG_PROC_FS) && defined(CONFIG_PROC_DEVICETREE)
-extern void proc_device_tree_add_node(struct device_node *, struct proc_dir_entry *);
-extern void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop);
-extern void proc_device_tree_remove_prop(struct proc_dir_entry *pde,
- struct property *prop);
-extern void proc_device_tree_update_prop(struct proc_dir_entry *pde,
- struct property *newprop,
- struct property *oldprop);
+#ifdef CONFIG_OF
+#define _OF_DECLARE(table, name, compat, fn, fn_type) \
+ static const struct of_device_id __of_table_##name \
+ __used __section(__##table##_of_table) \
+ = { .compatible = compat, \
+ .data = (fn == (fn_type)NULL) ? fn : fn }
+#else
+#define _OF_DECLARE(table, name, compat, fn, fn_type) \
+ static const struct of_device_id __of_table_##name \
+ __attribute__((unused)) \
+ = { .compatible = compat, \
+ .data = (fn == (fn_type)NULL) ? fn : fn }
+#endif
+
+typedef int (*of_init_fn_2)(struct device_node *, struct device_node *);
+typedef void (*of_init_fn_1)(struct device_node *);
+
+#define OF_DECLARE_1(table, name, compat, fn) \
+ _OF_DECLARE(table, name, compat, fn, of_init_fn_1)
+#define OF_DECLARE_2(table, name, compat, fn) \
+ _OF_DECLARE(table, name, compat, fn, of_init_fn_2)
+
+/**
+ * struct of_changeset_entry - Holds a changeset entry
+ *
+ * @node: list_head for the log list
+ * @action: notifier action
+ * @np: pointer to the device node affected
+ * @prop: pointer to the property affected
+ * @old_prop: hold a pointer to the original property
+ *
+ * Every modification of the device tree during a changeset
+ * is held in a list of of_changeset_entry structures.
+ * That way we can recover from a partial application, or we can
+ * revert the changeset
+ */
+struct of_changeset_entry {
+ struct list_head node;
+ unsigned long action;
+ struct device_node *np;
+ struct property *prop;
+ struct property *old_prop;
+};
+
+/**
+ * struct of_changeset - changeset tracker structure
+ *
+ * @entries: list_head for the changeset entries
+ *
+ * changesets are a convenient way to apply bulk changes to the
+ * live tree. In case of an error, changes are rolled-back.
+ * changesets live on after initial application, and if not
+ * destroyed after use, they can be reverted in one single call.
+ */
+struct of_changeset {
+ struct list_head entries;
+};
+
+enum of_reconfig_change {
+ OF_RECONFIG_NO_CHANGE = 0,
+ OF_RECONFIG_CHANGE_ADD,
+ OF_RECONFIG_CHANGE_REMOVE,
+};
+
+#ifdef CONFIG_OF_DYNAMIC
+extern int of_reconfig_notifier_register(struct notifier_block *);
+extern int of_reconfig_notifier_unregister(struct notifier_block *);
+extern int of_reconfig_notify(unsigned long, struct of_reconfig_data *rd);
+extern int of_reconfig_get_state_change(unsigned long action,
+ struct of_reconfig_data *arg);
+
+extern void of_changeset_init(struct of_changeset *ocs);
+extern void of_changeset_destroy(struct of_changeset *ocs);
+extern int of_changeset_apply(struct of_changeset *ocs);
+extern int of_changeset_revert(struct of_changeset *ocs);
+extern int of_changeset_action(struct of_changeset *ocs,
+ unsigned long action, struct device_node *np,
+ struct property *prop);
+
+static inline int of_changeset_attach_node(struct of_changeset *ocs,
+ struct device_node *np)
+{
+ return of_changeset_action(ocs, OF_RECONFIG_ATTACH_NODE, np, NULL);
+}
+
+static inline int of_changeset_detach_node(struct of_changeset *ocs,
+ struct device_node *np)
+{
+ return of_changeset_action(ocs, OF_RECONFIG_DETACH_NODE, np, NULL);
+}
+
+static inline int of_changeset_add_property(struct of_changeset *ocs,
+ struct device_node *np, struct property *prop)
+{
+ return of_changeset_action(ocs, OF_RECONFIG_ADD_PROPERTY, np, prop);
+}
+
+static inline int of_changeset_remove_property(struct of_changeset *ocs,
+ struct device_node *np, struct property *prop)
+{
+ return of_changeset_action(ocs, OF_RECONFIG_REMOVE_PROPERTY, np, prop);
+}
+
+static inline int of_changeset_update_property(struct of_changeset *ocs,
+ struct device_node *np, struct property *prop)
+{
+ return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop);
+}
+#else /* CONFIG_OF_DYNAMIC */
+static inline int of_reconfig_notifier_register(struct notifier_block *nb)
+{
+ return -EINVAL;
+}
+static inline int of_reconfig_notifier_unregister(struct notifier_block *nb)
+{
+ return -EINVAL;
+}
+static inline int of_reconfig_notify(unsigned long action,
+ struct of_reconfig_data *arg)
+{
+ return -EINVAL;
+}
+static inline int of_reconfig_get_state_change(unsigned long action,
+ struct of_reconfig_data *arg)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_OF_DYNAMIC */
+
+/* CONFIG_OF_RESOLVE api */
+extern int of_resolve_phandles(struct device_node *tree);
+
+/**
+ * Overlay support
+ */
+
+#ifdef CONFIG_OF_OVERLAY
+
+/* ID based overlays; the API for external users */
+int of_overlay_create(struct device_node *tree);
+int of_overlay_destroy(int id);
+int of_overlay_destroy_all(void);
+
+#else
+
+static inline int of_overlay_create(struct device_node *tree)
+{
+ return -ENOTSUPP;
+}
+
+static inline int of_overlay_destroy(int id)
+{
+ return -ENOTSUPP;
+}
+
+static inline int of_overlay_destroy_all(void)
+{
+ return -ENOTSUPP;
+}
+
#endif
#endif /* _LINUX_OF_H */
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index 5f6ed6b182b8..906ca7681756 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -40,7 +40,6 @@ extern u64 of_translate_dma_address(struct device_node *dev,
#ifdef CONFIG_OF_ADDRESS
extern u64 of_translate_address(struct device_node *np, const __be32 *addr);
-extern bool of_can_translate_address(struct device_node *dev);
extern int of_address_to_resource(struct device_node *dev, int index,
struct resource *r);
extern struct device_node *of_find_matching_node_by_address(
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 2b77058a7335..5f5b83461a81 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -66,7 +66,7 @@ extern char *of_fdt_get_string(struct boot_param_header *blob, u32 offset);
extern void *of_fdt_get_property(struct boot_param_header *blob,
unsigned long node,
const char *name,
- unsigned long *size);
+ int *size);
extern int of_fdt_is_compatible(struct boot_param_header *blob,
unsigned long node,
const char *compat);
@@ -81,26 +81,25 @@ extern int __initdata dt_root_size_cells;
extern struct boot_param_header *initial_boot_params;
/* For scanning the flat device-tree at boot time */
-extern char *find_flat_dt_string(u32 offset);
extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname,
int depth, void *data),
void *data);
-extern void *of_get_flat_dt_prop(unsigned long node, const char *name,
- unsigned long *size);
+extern const void *of_get_flat_dt_prop(unsigned long node, const char *name,
+ int *size);
extern int of_flat_dt_is_compatible(unsigned long node, const char *name);
extern int of_flat_dt_match(unsigned long node, const char *const *matches);
extern unsigned long of_get_flat_dt_root(void);
-extern int of_scan_flat_dt_by_path(const char *path,
- int (*it)(unsigned long node, const char *name, int depth, void *data),
- void *data);
extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
int depth, void *data);
extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
int depth, void *data);
+extern void early_init_fdt_scan_reserved_mem(void);
extern void early_init_dt_add_memory_arch(u64 base, u64 size);
+extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
+ bool no_map);
extern void * early_init_dt_alloc_memory_arch(u64 size, u64 align);
-extern u64 dt_mem_next_cell(int s, __be32 **cellp);
+extern u64 dt_mem_next_cell(int s, const __be32 **cellp);
/* Early flat tree scan hooks */
extern int early_init_dt_scan_root(unsigned long node, const char *uname,
@@ -118,6 +117,7 @@ extern void unflatten_and_copy_device_tree(void);
extern void early_init_devtree(void *);
extern void early_get_first_memblock_info(void *, phys_addr_t *);
#else /* CONFIG_OF_FLATTREE */
+static inline void early_init_fdt_scan_reserved_mem(void) {}
static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
static inline void unflatten_device_tree(void) {}
static inline void unflatten_and_copy_device_tree(void) {}
diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h
new file mode 100644
index 000000000000..56e0507a0d58
--- /dev/null
+++ b/include/linux/of_graph.h
@@ -0,0 +1,65 @@
+/*
+ * OF graph binding parsing helpers
+ *
+ * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * Copyright (C) 2012 Renesas Electronics Corp.
+ * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+#ifndef __LINUX_OF_GRAPH_H
+#define __LINUX_OF_GRAPH_H
+
+/**
+ * struct of_endpoint - the OF graph endpoint data structure
+ * @port: identifier (value of reg property) of a port this endpoint belongs to
+ * @id: identifier (value of reg property) of this endpoint
+ * @local_node: pointer to device_node of this endpoint
+ */
+struct of_endpoint {
+ unsigned int port;
+ unsigned int id;
+ const struct device_node *local_node;
+};
+
+#ifdef CONFIG_OF
+int of_graph_parse_endpoint(const struct device_node *node,
+ struct of_endpoint *endpoint);
+struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
+ struct device_node *previous);
+struct device_node *of_graph_get_remote_port_parent(
+ const struct device_node *node);
+struct device_node *of_graph_get_remote_port(const struct device_node *node);
+#else
+
+static inline int of_graph_parse_endpoint(const struct device_node *node,
+ struct of_endpoint *endpoint)
+{
+ return -ENOSYS;
+}
+static inline struct device_node *of_graph_get_next_endpoint(
+ const struct device_node *parent,
+ struct device_node *previous)
+{
+ return NULL;
+}
+
+static inline struct device_node *of_graph_get_remote_port_parent(
+ const struct device_node *node)
+{
+ return NULL;
+}
+
+static inline struct device_node *of_graph_get_remote_port(
+ const struct device_node *node)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_OF */
+
+#endif /* __LINUX_OF_GRAPH_H */
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index 05cb4a928252..9142922871a7 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -72,6 +72,7 @@ extern int of_platform_populate(struct device_node *root,
const struct of_device_id *matches,
const struct of_dev_auxdata *lookup,
struct device *parent);
+extern void of_platform_depopulate(struct device *parent);
#else
static inline int of_platform_populate(struct device_node *root,
const struct of_device_id *matches,
@@ -80,6 +81,13 @@ static inline int of_platform_populate(struct device_node *root,
{
return -ENODEV;
}
+static inline void of_platform_depopulate(struct device *parent) { }
+#endif
+
+#ifdef CONFIG_OF_DYNAMIC
+extern void of_platform_register_reconfig_notifier(void);
+#else
+static inline void of_platform_register_reconfig_notifier(void) { }
#endif
#endif /* _LINUX_OF_PLATFORM_H */
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
new file mode 100644
index 000000000000..9b1fbb7f29fc
--- /dev/null
+++ b/include/linux/of_reserved_mem.h
@@ -0,0 +1,53 @@
+#ifndef __OF_RESERVED_MEM_H
+#define __OF_RESERVED_MEM_H
+
+struct device;
+struct of_phandle_args;
+struct reserved_mem_ops;
+
+struct reserved_mem {
+ const char *name;
+ unsigned long fdt_node;
+ const struct reserved_mem_ops *ops;
+ phys_addr_t base;
+ phys_addr_t size;
+ void *priv;
+};
+
+struct reserved_mem_ops {
+ void (*device_init)(struct reserved_mem *rmem,
+ struct device *dev);
+ void (*device_release)(struct reserved_mem *rmem,
+ struct device *dev);
+};
+
+typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem,
+ unsigned long node, const char *uname);
+
+#ifdef CONFIG_OF_RESERVED_MEM
+void fdt_init_reserved_mem(void);
+void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
+ phys_addr_t base, phys_addr_t size);
+
+#define RESERVEDMEM_OF_DECLARE(name, compat, init) \
+ static const struct of_device_id __reservedmem_of_table_##name \
+ __used __section(__reservedmem_of_table) \
+ = { .compatible = compat, \
+ .data = (init == (reservedmem_of_init_fn)NULL) ? \
+ init : init }
+
+#else
+static inline void fdt_init_reserved_mem(void) { }
+static inline void fdt_reserved_mem_save_node(unsigned long node,
+ const char *uname, phys_addr_t base, phys_addr_t size) { }
+
+#define RESERVEDMEM_OF_DECLARE(name, compat, init) \
+ static const struct of_device_id __reservedmem_of_table_##name \
+ __attribute__((unused)) \
+ = { .compatible = compat, \
+ .data = (init == (reservedmem_of_init_fn)NULL) ? \
+ init : init }
+
+#endif
+
+#endif /* __OF_RESERVED_MEM_H */
diff --git a/include/linux/mailbox.h b/include/linux/pl320-ipc.h
index 5161f63ec1c8..5161f63ec1c8 100644
--- a/include/linux/mailbox.h
+++ b/include/linux/pl320-ipc.h
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 8c6583a53a06..ea05bc207c93 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -596,6 +596,7 @@ extern int dev_pm_put_subsys_data(struct device *dev);
*/
struct dev_pm_domain {
struct dev_pm_ops ops;
+ void (*detach)(struct device *dev, bool power_off);
};
/*
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 7c1d252b20c0..66cfa1a39d81 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -310,4 +310,56 @@ static inline void pm_genpd_syscore_poweron(struct device *dev)
pm_genpd_syscore_switch(dev, false);
}
+/* OF PM domain providers */
+struct of_device_id;
+
+struct genpd_onecell_data {
+ struct generic_pm_domain **domains;
+ unsigned int num_domains;
+};
+
+typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
+ void *data);
+
+#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
+int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
+ void *data);
+void of_genpd_del_provider(struct device_node *np);
+
+struct generic_pm_domain *__of_genpd_xlate_simple(
+ struct of_phandle_args *genpdspec,
+ void *data);
+struct generic_pm_domain *__of_genpd_xlate_onecell(
+ struct of_phandle_args *genpdspec,
+ void *data);
+
+int genpd_dev_pm_attach(struct device *dev);
+#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */
+static inline int __of_genpd_add_provider(struct device_node *np,
+ genpd_xlate_t xlate, void *data)
+{
+ return 0;
+}
+static inline void of_genpd_del_provider(struct device_node *np) {}
+
+#define __of_genpd_xlate_simple NULL
+#define __of_genpd_xlate_onecell NULL
+
+static inline int genpd_dev_pm_attach(struct device *dev)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
+
+static inline int of_genpd_add_provider_simple(struct device_node *np,
+ struct generic_pm_domain *genpd)
+{
+ return __of_genpd_add_provider(np, __of_genpd_xlate_simple, genpd);
+}
+static inline int of_genpd_add_provider_onecell(struct device_node *np,
+ struct genpd_onecell_data *data)
+{
+ return __of_genpd_add_provider(np, __of_genpd_xlate_onecell, data);
+}
+
#endif /* _LINUX_PM_DOMAIN_H */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index f729be981da0..7a15b5b24c0b 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -285,6 +285,22 @@ static inline int uart_poll_timeout(struct uart_port *port)
/*
* Console helpers.
*/
+struct earlycon_device {
+ struct console *con;
+ struct uart_port port;
+ char options[16]; /* e.g., 115200n8 */
+ unsigned int baud;
+};
+int setup_earlycon(char *buf, const char *match,
+ int (*setup)(struct earlycon_device *, const char *));
+
+#define EARLYCON_DECLARE(name, func) \
+static int __init name ## _setup_earlycon(char *buf) \
+{ \
+ return setup_earlycon(buf, __stringify(name), func); \
+} \
+early_param("earlycon", name ## _setup_earlycon);
+
struct uart_port *uart_get_console(struct uart_port *ports, int nr,
struct console *c);
void uart_parse_options(char *options, int *baud, int *parity, int *bits,
diff --git a/include/linux/usb/xhci_pdriver.h b/include/linux/usb/xhci_pdriver.h
new file mode 100644
index 000000000000..376654b5b0f7
--- /dev/null
+++ b/include/linux/usb/xhci_pdriver.h
@@ -0,0 +1,27 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef __USB_CORE_XHCI_PDRIVER_H
+#define __USB_CORE_XHCI_PDRIVER_H
+
+/**
+ * struct usb_xhci_pdata - platform_data for generic xhci platform driver
+ *
+ * @usb3_lpm_capable: determines if this xhci platform supports USB3
+ * LPM capability
+ *
+ */
+struct usb_xhci_pdata {
+ unsigned usb3_lpm_capable:1;
+};
+
+#endif /* __USB_CORE_XHCI_PDRIVER_H */
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 131a0bda7aec..908925ace776 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -37,7 +37,7 @@ TRACE_EVENT(kvm_userspace_exit,
__entry->errno < 0 ? -__entry->errno : __entry->reason)
);
-#if defined(CONFIG_HAVE_KVM_IRQCHIP)
+#if defined(CONFIG_HAVE_KVM_IRQFD)
TRACE_EVENT(kvm_set_irq,
TP_PROTO(unsigned int gsi, int level, int irq_source_id),
TP_ARGS(gsi, level, irq_source_id),
@@ -57,7 +57,7 @@ TRACE_EVENT(kvm_set_irq,
TP_printk("gsi %u level %d source %d",
__entry->gsi, __entry->level, __entry->irq_source_id)
);
-#endif
+#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
#if defined(__KVM_HAVE_IOAPIC)
#define kvm_deliver_mode \
@@ -124,7 +124,7 @@ TRACE_EVENT(kvm_msi_set_irq,
#endif /* defined(__KVM_HAVE_IOAPIC) */
-#if defined(CONFIG_HAVE_KVM_IRQCHIP)
+#if defined(CONFIG_HAVE_KVM_IRQFD)
TRACE_EVENT(kvm_ack_irq,
TP_PROTO(unsigned int irqchip, unsigned int pin),
@@ -149,7 +149,7 @@ TRACE_EVENT(kvm_ack_irq,
#endif
);
-#endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */
+#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 3ce25b5d75a9..3da5dfed2629 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -316,6 +316,7 @@ header-y += ppp-ioctl.h
header-y += ppp_defs.h
header-y += pps.h
header-y += prctl.h
+header-y += psci.h
header-y += ptp_clock.h
header-y += ptrace.h
header-y += qnx4_fs.h
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 932d7f2637d6..cbc65ee211a4 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -171,6 +171,7 @@ struct kvm_pit_config {
#define KVM_EXIT_WATCHDOG 21
#define KVM_EXIT_S390_TSCH 22
#define KVM_EXIT_EPR 23
+#define KVM_EXIT_SYSTEM_EVENT 24
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -301,6 +302,13 @@ struct kvm_run {
struct {
__u32 epr;
} epr;
+ /* KVM_EXIT_SYSTEM_EVENT */
+ struct {
+#define KVM_SYSTEM_EVENT_SHUTDOWN 1
+#define KVM_SYSTEM_EVENT_RESET 2
+ __u32 type;
+ __u64 flags;
+ } system_event;
/* Fix the size of the union. */
char padding[256];
};
@@ -391,8 +399,9 @@ struct kvm_vapic_addr {
__u64 vapic_addr;
};
-/* for KVM_SET_MPSTATE */
+/* for KVM_SET_MP_STATE */
+/* not all states are valid on all architectures */
#define KVM_MP_STATE_RUNNABLE 0
#define KVM_MP_STATE_UNINITIALIZED 1
#define KVM_MP_STATE_INIT_RECEIVED 2
@@ -573,9 +582,7 @@ struct kvm_ppc_smmu_info {
#endif
/* Bug in KVM_SET_USER_MEMORY_REGION fixed: */
#define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21
-#ifdef __KVM_HAVE_USER_NMI
#define KVM_CAP_USER_NMI 22
-#endif
#ifdef __KVM_HAVE_GUEST_DEBUG
#define KVM_CAP_SET_GUEST_DEBUG 23
#endif
@@ -657,9 +664,7 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_PPC_GET_SMMU_INFO 78
#define KVM_CAP_S390_COW 79
#define KVM_CAP_PPC_ALLOC_HTAB 80
-#ifdef __KVM_HAVE_READONLY_MEM
#define KVM_CAP_READONLY_MEM 81
-#endif
#define KVM_CAP_IRQFD_RESAMPLE 82
#define KVM_CAP_PPC_BOOKE_WATCHDOG 83
#define KVM_CAP_PPC_HTAB_FD 84
@@ -675,6 +680,8 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_SPAPR_MULTITCE 94
#define KVM_CAP_EXT_EMUL_CPUID 95
#define KVM_CAP_HYPERV_TIME 96
+#define KVM_CAP_ARM_PSCI_0_2 102
+#define KVM_CAP_CHECK_EXTENSION_VM 105
#ifdef KVM_CAP_IRQ_ROUTING
@@ -847,14 +854,25 @@ struct kvm_device_attr {
__u64 addr; /* userspace address of attr data */
};
-#define KVM_DEV_TYPE_FSL_MPIC_20 1
-#define KVM_DEV_TYPE_FSL_MPIC_42 2
-#define KVM_DEV_TYPE_XICS 3
-#define KVM_DEV_TYPE_VFIO 4
#define KVM_DEV_VFIO_GROUP 1
#define KVM_DEV_VFIO_GROUP_ADD 1
#define KVM_DEV_VFIO_GROUP_DEL 2
-#define KVM_DEV_TYPE_ARM_VGIC_V2 5
+
+enum kvm_device_type {
+ KVM_DEV_TYPE_FSL_MPIC_20 = 1,
+#define KVM_DEV_TYPE_FSL_MPIC_20 KVM_DEV_TYPE_FSL_MPIC_20
+ KVM_DEV_TYPE_FSL_MPIC_42,
+#define KVM_DEV_TYPE_FSL_MPIC_42 KVM_DEV_TYPE_FSL_MPIC_42
+ KVM_DEV_TYPE_XICS,
+#define KVM_DEV_TYPE_XICS KVM_DEV_TYPE_XICS
+ KVM_DEV_TYPE_VFIO,
+#define KVM_DEV_TYPE_VFIO KVM_DEV_TYPE_VFIO
+ KVM_DEV_TYPE_ARM_VGIC_V2,
+#define KVM_DEV_TYPE_ARM_VGIC_V2 KVM_DEV_TYPE_ARM_VGIC_V2
+ KVM_DEV_TYPE_FLIC,
+#define KVM_DEV_TYPE_FLIC KVM_DEV_TYPE_FLIC
+ KVM_DEV_TYPE_MAX,
+};
/*
* ioctls for VM fds
@@ -992,7 +1010,7 @@ struct kvm_s390_ucas_mapping {
#define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97)
#define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state)
#define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state)
-/* Available with KVM_CAP_NMI */
+/* Available with KVM_CAP_USER_NMI */
#define KVM_NMI _IO(KVMIO, 0x9a)
/* Available with KVM_CAP_SET_GUEST_DEBUG */
#define KVM_SET_GUEST_DEBUG _IOW(KVMIO, 0x9b, struct kvm_guest_debug)
diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h
new file mode 100644
index 000000000000..310d83e0a91b
--- /dev/null
+++ b/include/uapi/linux/psci.h
@@ -0,0 +1,90 @@
+/*
+ * ARM Power State and Coordination Interface (PSCI) header
+ *
+ * This header holds common PSCI defines and macros shared
+ * by: ARM kernel, ARM64 kernel, KVM ARM/ARM64 and user space.
+ *
+ * Copyright (C) 2014 Linaro Ltd.
+ * Author: Anup Patel <anup.patel@linaro.org>
+ */
+
+#ifndef _UAPI_LINUX_PSCI_H
+#define _UAPI_LINUX_PSCI_H
+
+/*
+ * PSCI v0.1 interface
+ *
+ * The PSCI v0.1 function numbers are implementation defined.
+ *
+ * Only PSCI return values such as: SUCCESS, NOT_SUPPORTED,
+ * INVALID_PARAMS, and DENIED defined below are applicable
+ * to PSCI v0.1.
+ */
+
+/* PSCI v0.2 interface */
+#define PSCI_0_2_FN_BASE 0x84000000
+#define PSCI_0_2_FN(n) (PSCI_0_2_FN_BASE + (n))
+#define PSCI_0_2_64BIT 0x40000000
+#define PSCI_0_2_FN64_BASE \
+ (PSCI_0_2_FN_BASE + PSCI_0_2_64BIT)
+#define PSCI_0_2_FN64(n) (PSCI_0_2_FN64_BASE + (n))
+
+#define PSCI_0_2_FN_PSCI_VERSION PSCI_0_2_FN(0)
+#define PSCI_0_2_FN_CPU_SUSPEND PSCI_0_2_FN(1)
+#define PSCI_0_2_FN_CPU_OFF PSCI_0_2_FN(2)
+#define PSCI_0_2_FN_CPU_ON PSCI_0_2_FN(3)
+#define PSCI_0_2_FN_AFFINITY_INFO PSCI_0_2_FN(4)
+#define PSCI_0_2_FN_MIGRATE PSCI_0_2_FN(5)
+#define PSCI_0_2_FN_MIGRATE_INFO_TYPE PSCI_0_2_FN(6)
+#define PSCI_0_2_FN_MIGRATE_INFO_UP_CPU PSCI_0_2_FN(7)
+#define PSCI_0_2_FN_SYSTEM_OFF PSCI_0_2_FN(8)
+#define PSCI_0_2_FN_SYSTEM_RESET PSCI_0_2_FN(9)
+
+#define PSCI_0_2_FN64_CPU_SUSPEND PSCI_0_2_FN64(1)
+#define PSCI_0_2_FN64_CPU_ON PSCI_0_2_FN64(3)
+#define PSCI_0_2_FN64_AFFINITY_INFO PSCI_0_2_FN64(4)
+#define PSCI_0_2_FN64_MIGRATE PSCI_0_2_FN64(5)
+#define PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU PSCI_0_2_FN64(7)
+
+/* PSCI v0.2 power state encoding for CPU_SUSPEND function */
+#define PSCI_0_2_POWER_STATE_ID_MASK 0xffff
+#define PSCI_0_2_POWER_STATE_ID_SHIFT 0
+#define PSCI_0_2_POWER_STATE_TYPE_SHIFT 16
+#define PSCI_0_2_POWER_STATE_TYPE_MASK \
+ (0x1 << PSCI_0_2_POWER_STATE_TYPE_SHIFT)
+#define PSCI_0_2_POWER_STATE_AFFL_SHIFT 24
+#define PSCI_0_2_POWER_STATE_AFFL_MASK \
+ (0x3 << PSCI_0_2_POWER_STATE_AFFL_SHIFT)
+
+/* PSCI v0.2 affinity level state returned by AFFINITY_INFO */
+#define PSCI_0_2_AFFINITY_LEVEL_ON 0
+#define PSCI_0_2_AFFINITY_LEVEL_OFF 1
+#define PSCI_0_2_AFFINITY_LEVEL_ON_PENDING 2
+
+/* PSCI v0.2 multicore support in Trusted OS returned by MIGRATE_INFO_TYPE */
+#define PSCI_0_2_TOS_UP_MIGRATE 0
+#define PSCI_0_2_TOS_UP_NO_MIGRATE 1
+#define PSCI_0_2_TOS_MP 2
+
+/* PSCI version decoding (independent of PSCI version) */
+#define PSCI_VERSION_MAJOR_SHIFT 16
+#define PSCI_VERSION_MINOR_MASK \
+ ((1U << PSCI_VERSION_MAJOR_SHIFT) - 1)
+#define PSCI_VERSION_MAJOR_MASK ~PSCI_VERSION_MINOR_MASK
+#define PSCI_VERSION_MAJOR(ver) \
+ (((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT)
+#define PSCI_VERSION_MINOR(ver) \
+ ((ver) & PSCI_VERSION_MINOR_MASK)
+
+/* PSCI return values (inclusive of all PSCI versions) */
+#define PSCI_RET_SUCCESS 0
+#define PSCI_RET_NOT_SUPPORTED -1
+#define PSCI_RET_INVALID_PARAMS -2
+#define PSCI_RET_DENIED -3
+#define PSCI_RET_ALREADY_ON -4
+#define PSCI_RET_ON_PENDING -5
+#define PSCI_RET_INTERNAL_FAILURE -6
+#define PSCI_RET_NOT_PRESENT -7
+#define PSCI_RET_DISABLED -8
+
+#endif /* _UAPI_LINUX_PSCI_H */