summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrey Konovalov <andrey.konovalov@linaro.org>2013-05-25 20:00:11 +0400
committerAndrey Konovalov <andrey.konovalov@linaro.org>2013-05-25 20:00:11 +0400
commitc2bdb2bc296e6f8fbed5a9f0f12035efbe5cfdee (patch)
tree2de3d683ffe641e2ce55c90778dcf688919cbdcb
parentc8d72f1d768f03b6e1e3d2995d4dfadb4f21c067 (diff)
PINNED manifest and solutions for lsk-20130525.1
Signed-off-by: Andrey Konovalov <andrey.konovalov@linaro.org>
-rw-r--r--manifest.pin5
-rw-r--r--solutions/f39f86eaa6e8771669e537972f36862a/conflict140
-rw-r--r--solutions/f39f86eaa6e8771669e537972f36862a/conflict2435
-rw-r--r--solutions/f39f86eaa6e8771669e537972f36862a/conflict3179
-rw-r--r--solutions/f39f86eaa6e8771669e537972f36862a/solution130
-rw-r--r--solutions/f39f86eaa6e8771669e537972f36862a/solution2285
-rw-r--r--solutions/f39f86eaa6e8771669e537972f36862a/solution365
7 files changed, 1037 insertions, 2 deletions
diff --git a/manifest.pin b/manifest.pin
index 5095d68..5ab47b3 100644
--- a/manifest.pin
+++ b/manifest.pin
@@ -1,12 +1,13 @@
# Autogenerated PINNED manifest for linux-linaro-lsk
-# Date: 20130522-17:23:19
+# Date: 20130525-19:43:18
#
# topics
#
topic lsk-vexpress tixy/lsk-3.9-vexpress PIN b068b1e7d514d8806623ece2fc76aacf523f487d merge
topic big-LITTLE-MP-upstream b_L_mp/big-LITTLE-MP-master-v16 PIN e695239d5f0a553e28a1ff5f346fcc3fafab2b2f merge
topic iks nico/iks PIN 2be12549c900303c418fcbb7044979e190fd4e7e merge
-topic linux-3.x.y linux_stable/linux-3.9.y PIN 4bb08696fab71294c8f1c134a21be9159f82ba08 merge
+topic lsk-vexpress-iks ynk/lsk-3.9-vexpress-iks PIN 82d931619b0843d88ff2dea3f50b0b00b4d0bc3d merge
+topic linux-3.x.y linux_stable/linux-3.9.y PIN 0bfd8ffeff9dda08c69381d65c72e0aa58706ef6 merge
topic core-configs configs/config-core-3.9 PIN d2e7da5149679b501cdb4b8449bf15e7ae1c0bdb merge
topic linaro-builddeb-tweaks ynk/linaro-builddeb-tweaks PIN c54d0ac144b10d010c587e88e3f189f03acf319a merge
#
diff --git a/solutions/f39f86eaa6e8771669e537972f36862a/conflict1 b/solutions/f39f86eaa6e8771669e537972f36862a/conflict1
new file mode 100644
index 0000000..e727444
--- /dev/null
+++ b/solutions/f39f86eaa6e8771669e537972f36862a/conflict1
@@ -0,0 +1,40 @@
+diff --cc arch/arm/include/asm/pmu.h
+index 0cd7824,a7eaad3..0000000
+--- a/arch/arm/include/asm/pmu.h
++++ b/arch/arm/include/asm/pmu.h
+@@@ -71,6 -73,21 +73,24 @@@ struct cpupmu_regs
+ u32 pmxevtcnt[8];
+ };
+
+++<<<<<<< HEAD
+++=======
++ struct arm_cpu_pmu {
++ bool valid;
++ bool active;
++
++ u32 mpidr;
++ int irq;
++
++ struct perf_event *hw_events[ARMPMU_MAX_HWEVENTS];
++ unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
++ struct pmu_hw_events cpu_hw_events;
++ struct cpupmu_regs cpu_pmu_regs;
++
++ void *logical_state;
++ };
++
+++>>>>>>> tracking-lsk-vexpress-iks
+ struct arm_pmu {
+ struct pmu pmu;
+ cpumask_t active_irqs;
+@@@ -93,6 -110,7 +113,10 @@@
+ int (*map_event)(struct perf_event *event);
+ void (*save_regs)(struct arm_pmu *, struct cpupmu_regs *);
+ void (*restore_regs)(struct arm_pmu *, struct cpupmu_regs *);
+++<<<<<<< HEAD
+++=======
++ void (*cpu_init)(struct arm_pmu *, struct arm_cpu_pmu *);
+++>>>>>>> tracking-lsk-vexpress-iks
+ int num_events;
+ atomic_t active_events;
+ struct mutex reserve_mutex;
diff --git a/solutions/f39f86eaa6e8771669e537972f36862a/conflict2 b/solutions/f39f86eaa6e8771669e537972f36862a/conflict2
new file mode 100644
index 0000000..af93b75
--- /dev/null
+++ b/solutions/f39f86eaa6e8771669e537972f36862a/conflict2
@@ -0,0 +1,435 @@
+diff --cc arch/arm/kernel/perf_event_cpu.c
+index 0b48a38,b3ae24f..0000000
+--- a/arch/arm/kernel/perf_event_cpu.c
++++ b/arch/arm/kernel/perf_event_cpu.c
+@@@ -19,6 -19,7 +19,10 @@@
+ #define pr_fmt(fmt) "CPU PMU: " fmt
+
+ #include <linux/bitmap.h>
+++<<<<<<< HEAD
+++=======
++ #include <linux/cpumask.h>
+++>>>>>>> tracking-lsk-vexpress-iks
+ #include <linux/cpu_pm.h>
+ #include <linux/export.h>
+ #include <linux/kernel.h>
+@@@ -30,23 -34,33 +37,44 @@@
+ #include <asm/cputype.h>
+ #include <asm/irq_regs.h>
+ #include <asm/pmu.h>
++ #include <asm/smp_plat.h>
++ #include <asm/topology.h>
+
+++<<<<<<< HEAD
+ +/* Set at runtime when we know what CPU type we are. */
+ +static DEFINE_PER_CPU(struct arm_pmu *, cpu_pmu);
+++=======
++ static LIST_HEAD(cpu_pmus_list);
+++>>>>>>> tracking-lsk-vexpress-iks
+
+- static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
+- static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
+- static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
++ #define cpu_for_each_pmu(pmu, cpu_pmu, cpu) \
++ for_each_pmu(pmu, &cpu_pmus_list) \
++ if (((cpu_pmu) = per_cpu_ptr((pmu)->cpu_pmus, cpu))->valid)
++
++ static struct arm_pmu *__cpu_find_any_pmu(unsigned int cpu)
++ {
++ struct arm_pmu *pmu;
++ struct arm_cpu_pmu *cpu_pmu;
++
++ cpu_for_each_pmu(pmu, cpu_pmu, cpu)
++ return pmu;
++
++ return NULL;
++ }
+
+ +static DEFINE_PER_CPU(struct cpupmu_regs, cpu_pmu_regs);
+ +
+ /*
+ * Despite the names, these two functions are CPU-specific and are used
+ * by the OProfile/perf code.
+ */
+ const char *perf_pmu_name(void)
+ {
+++<<<<<<< HEAD
+ + struct arm_pmu *pmu = per_cpu(cpu_pmu, 0);
+++=======
++ struct arm_pmu *pmu = __cpu_find_any_pmu(0);
+++>>>>>>> tracking-lsk-vexpress-iks
+ if (!pmu)
+ return NULL;
+
+@@@ -56,7 -70,7 +84,11 @@@ EXPORT_SYMBOL_GPL(perf_pmu_name)
+
+ int perf_num_counters(void)
+ {
+++<<<<<<< HEAD
+ + struct arm_pmu *pmu = per_cpu(cpu_pmu, 0);
+++=======
++ struct arm_pmu *pmu = __cpu_find_any_pmu(0);
+++>>>>>>> tracking-lsk-vexpress-iks
+
+ if (!pmu)
+ return 0;
+@@@ -70,48 -84,66 +102,90 @@@ EXPORT_SYMBOL_GPL(perf_num_counters)
+ #include "perf_event_v6.c"
+ #include "perf_event_v7.c"
+
+- static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
++ static struct pmu_hw_events *cpu_pmu_get_cpu_events(struct arm_pmu *pmu)
+ {
+- return &__get_cpu_var(cpu_hw_events);
++ return &this_cpu_ptr(pmu->cpu_pmus)->cpu_hw_events;
+ }
+
+- static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
++ static int find_logical_cpu(u32 mpidr)
+ {
+++<<<<<<< HEAD
+ + int i, irq, irqs;
+ + struct platform_device *pmu_device = cpu_pmu->plat_device;
+ + int cpu = -1;
+++=======
++ int cpu = bL_switcher_get_logical_index(mpidr);
+++>>>>>>> tracking-lsk-vexpress-iks
+
+- irqs = min(pmu_device->num_resources, num_possible_cpus());
++ if (cpu != -EUNATCH)
++ return cpu;
+
+++<<<<<<< HEAD
+ + for (i = 0; i < irqs; ++i) {
+ + cpu = cpumask_next(cpu, &cpu_pmu->valid_cpus);
+ + if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
+++=======
++ return get_logical_index(mpidr);
++ }
++
++ static void cpu_pmu_free_irq(struct arm_pmu *pmu)
++ {
++ int i;
++ int cpu;
++ struct arm_cpu_pmu *cpu_pmu;
++
++ for_each_possible_cpu(i) {
++ if (!(cpu_pmu = per_cpu_ptr(pmu->cpu_pmus, i)))
+++>>>>>>> tracking-lsk-vexpress-iks
+ continue;
+- irq = platform_get_irq(pmu_device, i);
+- if (irq >= 0)
+- free_irq(irq, cpu_pmu);
++
++ if (cpu_pmu->mpidr == -1)
++ continue;
++
++ cpu = find_logical_cpu(cpu_pmu->mpidr);
++ if (cpu < 0)
++ continue;
++
++ if (!cpumask_test_and_clear_cpu(cpu, &pmu->active_irqs))
++ continue;
++ if (cpu_pmu->irq >= 0)
++ free_irq(cpu_pmu->irq, pmu);
+ }
+ }
+
+- static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
++ static int cpu_pmu_request_irq(struct arm_pmu *pmu, irq_handler_t handler)
+ {
+ int i, err, irq, irqs;
+++<<<<<<< HEAD
+ + struct platform_device *pmu_device = cpu_pmu->plat_device;
+ + int cpu = -1;
+++=======
++ int cpu;
++ struct arm_cpu_pmu *cpu_pmu;
+++>>>>>>> tracking-lsk-vexpress-iks
+
+- if (!pmu_device)
+- return -ENODEV;
++ irqs = 0;
++ for_each_possible_cpu(i)
++ if (per_cpu_ptr(pmu->cpu_pmus, i))
++ ++irqs;
+
+- irqs = min(pmu_device->num_resources, num_possible_cpus());
+ if (irqs < 1) {
+ pr_err("no irqs for PMUs defined\n");
+ return -ENODEV;
+ }
+
+++<<<<<<< HEAD
+ + for (i = 0; i < irqs; ++i) {
+ + err = 0;
+ + cpu = cpumask_next(cpu, &cpu_pmu->valid_cpus);
+ + irq = platform_get_irq(pmu_device, i);
+++=======
++ for_each_possible_cpu(i) {
++ if (!(cpu_pmu = per_cpu_ptr(pmu->cpu_pmus, i)))
++ continue;
++
++ irq = cpu_pmu->irq;
+++>>>>>>> tracking-lsk-vexpress-iks
+ if (irq < 0)
+ continue;
+
+@@@ -134,29 -173,36 +215,52 @@@
+ return err;
+ }
+
+++<<<<<<< HEAD
+ + cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
+++=======
++ cpumask_set_cpu(cpu, &pmu->active_irqs);
+++>>>>>>> tracking-lsk-vexpress-iks
+ }
+
+ return 0;
+ }
+
+- static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
++ static void cpu_pmu_init(struct arm_pmu *pmu)
+ {
+ int cpu;
+++<<<<<<< HEAD
+ + for_each_cpu_mask(cpu, cpu_pmu->valid_cpus) {
+ + struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
+ + events->events = per_cpu(hw_events, cpu);
+ + events->used_mask = per_cpu(used_mask, cpu);
+++=======
++ for_each_cpu_mask(cpu, pmu->valid_cpus) {
++ struct arm_cpu_pmu *cpu_pmu = per_cpu_ptr(pmu->cpu_pmus, cpu);
++ struct pmu_hw_events *events = &cpu_pmu->cpu_hw_events;
++
++ events->events = cpu_pmu->hw_events;
++ events->used_mask = cpu_pmu->used_mask;
+++>>>>>>> tracking-lsk-vexpress-iks
+ raw_spin_lock_init(&events->pmu_lock);
++
++ if (pmu->cpu_init)
++ pmu->cpu_init(pmu, cpu_pmu);
++
++ cpu_pmu->valid = true;
+ }
+
+- cpu_pmu->get_hw_events = cpu_pmu_get_cpu_events;
+- cpu_pmu->request_irq = cpu_pmu_request_irq;
+- cpu_pmu->free_irq = cpu_pmu_free_irq;
++ pmu->get_hw_events = cpu_pmu_get_cpu_events;
++ pmu->request_irq = cpu_pmu_request_irq;
++ pmu->free_irq = cpu_pmu_free_irq;
+
+ /* Ensure the PMU has sane values out of reset. */
+++<<<<<<< HEAD
+ + if (cpu_pmu->reset)
+ + on_each_cpu_mask(&cpu_pmu->valid_cpus, cpu_pmu->reset, cpu_pmu, 1);
+++=======
++ if (pmu->reset)
++ on_each_cpu_mask(&pmu->valid_cpus, pmu->reset, pmu, 1);
+++>>>>>>> tracking-lsk-vexpress-iks
+ }
+
+ /*
+@@@ -168,38 -214,44 +272,74 @@@
+ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
+ unsigned long action, void *hcpu)
+ {
+++<<<<<<< HEAD
+ + struct arm_pmu *pmu = per_cpu(cpu_pmu, (long)hcpu);
+++=======
++ struct arm_pmu *pmu;
++ struct arm_cpu_pmu *cpu_pmu;
++ int ret = NOTIFY_DONE;
+++>>>>>>> tracking-lsk-vexpress-iks
+
+ if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
+ return NOTIFY_DONE;
+
+++<<<<<<< HEAD
+ + if (pmu && pmu->reset)
+ + pmu->reset(pmu);
+ + else
+ + return NOTIFY_DONE;
+++=======
++ cpu_for_each_pmu(pmu, cpu_pmu, (unsigned int)hcpu)
++ if (pmu->reset) {
++ pmu->reset(pmu);
++ ret = NOTIFY_OK;
++ }
+++>>>>>>> tracking-lsk-vexpress-iks
+
+- return NOTIFY_OK;
++ return ret;
++ }
++
++ static int cpu_pmu_pm_notify(struct notifier_block *b,
++ unsigned long action, void *hcpu)
++ {
++ int cpu = smp_processor_id();
++ struct arm_pmu *pmu;
++ struct arm_cpu_pmu *cpu_pmu;
++ int ret = NOTIFY_DONE;
++
++ cpu_for_each_pmu(pmu, cpu_pmu, cpu) {
++ struct cpupmu_regs *pmuregs = &cpu_pmu->cpu_pmu_regs;
++
++ if (action == CPU_PM_ENTER && pmu->save_regs)
++ pmu->save_regs(pmu, pmuregs);
++ else if (action == CPU_PM_EXIT && pmu->restore_regs)
++ pmu->restore_regs(pmu, pmuregs);
++
++ ret = NOTIFY_OK;
++ }
++
++ return ret;
+ }
+
+ +static int cpu_pmu_pm_notify(struct notifier_block *b,
+ + unsigned long action, void *hcpu)
+ +{
+ + int cpu = smp_processor_id();
+ + struct arm_pmu *pmu = per_cpu(cpu_pmu, cpu);
+ + struct cpupmu_regs *pmuregs = &per_cpu(cpu_pmu_regs, cpu);
+ +
+ + if (!pmu)
+ + return NOTIFY_DONE;
+ +
+ + if (action == CPU_PM_ENTER && pmu->save_regs) {
+ + pmu->save_regs(pmu, pmuregs);
+ + } else if (action == CPU_PM_EXIT && pmu->restore_regs) {
+ + pmu->restore_regs(pmu, pmuregs);
+ + }
+ +
+ + return NOTIFY_OK;
+ +}
+ +
+ static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
+ .notifier_call = cpu_pmu_notify,
+ };
+@@@ -291,20 -414,24 +502,32 @@@ static int cpu_pmu_device_probe(struct
+ const struct of_device_id *of_id;
+ struct device_node *node = pdev->dev.of_node;
+ struct arm_pmu *pmu;
+++<<<<<<< HEAD
+ + int ret = 0;
+ + int cpu;
+++=======
++ struct arm_cpu_pmu __percpu *cpu_pmus;
++ int ret = 0;
+++>>>>>>> tracking-lsk-vexpress-iks
+
+ pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
+- if (!pmu) {
+- pr_info("failed to allocate PMU device!");
+- return -ENOMEM;
+- }
++ if (!pmu)
++ goto error_nomem;
++
++ pmu->cpu_pmus = cpu_pmus = alloc_percpu(struct arm_cpu_pmu);
++ if (!cpu_pmus)
++ goto error_nomem;
+
+ if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
+ smp_call_func_t init_fn = (smp_call_func_t)of_id->data;
+ struct device_node *ncluster;
+ int cluster = -1;
+ cpumask_t sibling_mask;
+++<<<<<<< HEAD
+++=======
++ cpumask_t phys_sibling_mask;
++ unsigned int i;
+++>>>>>>> tracking-lsk-vexpress-iks
+
+ ncluster = of_parse_phandle(node, "cluster", 0);
+ if (ncluster) {
+@@@ -315,11 -442,59 +538,67 @@@
+ cluster = be32_to_cpup(hwid);
+ }
+ /* set sibling mask to all cpu mask if socket is not specified */
+++<<<<<<< HEAD
+ + if (cluster == -1 ||
+ + cluster_to_logical_mask(cluster, &sibling_mask))
+ + cpumask_setall(&sibling_mask);
+ +
+ + smp_call_function_any(&sibling_mask, init_fn, pmu, 1);
+++=======
++ /*
++ * In a switcher kernel, we affine all PMUs to CPUs and
++ * abstract the runtime presence/absence of PMUs at a lower
++ * level.
++ */
++ if (cluster == -1 || IS_ENABLED(CONFIG_BL_SWITCHER) ||
++ cluster_to_logical_mask(cluster, &sibling_mask))
++ cpumask_copy(&sibling_mask, cpu_possible_mask);
++
++ if (bL_switcher_get_enabled())
++ /*
++ * The switcher initialises late now, so it should not
++ * have initialised yet:
++ */
++ BUG();
++
++ cpumask_copy(&phys_sibling_mask, cpu_possible_mask);
++
++ /*
++ * HACK: Deduce how the switcher will modify the topology
++ * in order to fill in PMU<->CPU combinations which don't
++ * make sense when the switcher is disabled. Ideally, this
++ * knowledge should come from the swithcer somehow.
++ */
++ for_each_possible_cpu(i) {
++ int cpu = i;
++
++ per_cpu_ptr(cpu_pmus, i)->mpidr = -1;
++ per_cpu_ptr(cpu_pmus, i)->irq = -1;
++
++ if (cpu_topology[i].socket_id != cluster) {
++ cpumask_clear_cpu(i, &phys_sibling_mask);
++ cpu = bL_get_partner(i, cluster);
++ }
++
++ if (cpu == -1)
++ cpumask_clear_cpu(i, &sibling_mask);
++ else {
++ int irq = find_irq(pdev, node, ncluster,
++ cpu_logical_map(cpu));
++ per_cpu_ptr(cpu_pmus, i)->mpidr =
++ cpu_logical_map(cpu);
++ per_cpu_ptr(cpu_pmus, i)->irq = irq;
++ }
++ }
++
++ /*
++ * This relies on an MP view of the system to choose the right
++ * CPU to run init_fn:
++ */
++ smp_call_function_any(&phys_sibling_mask, init_fn, pmu, 1);
++
++ bL_switcher_put_enabled();
+++>>>>>>> tracking-lsk-vexpress-iks
+
+ /* now set the valid_cpus after init */
+ cpumask_copy(&pmu->valid_cpus, &sibling_mask);
+@@@ -327,14 -502,9 +606,15 @@@
+ ret = probe_current_pmu(pmu);
+ }
+
+- if (ret) {
+- pr_info("failed to probe PMU!");
+- goto out_free;
+- }
++ if (ret)
++ goto error;
+
+++<<<<<<< HEAD
+ + for_each_cpu_mask(cpu, pmu->valid_cpus)
+ + per_cpu(cpu_pmu, cpu) = pmu;
+ +
+++=======
+++>>>>>>> tracking-lsk-vexpress-iks
+ pmu->plat_device = pdev;
+ cpu_pmu_init(pmu);
+ ret = armpmu_register(pmu, -1);
diff --git a/solutions/f39f86eaa6e8771669e537972f36862a/conflict3 b/solutions/f39f86eaa6e8771669e537972f36862a/conflict3
new file mode 100644
index 0000000..3175527
--- /dev/null
+++ b/solutions/f39f86eaa6e8771669e537972f36862a/conflict3
@@ -0,0 +1,179 @@
+diff --cc arch/arm/kernel/perf_event_v7.c
+index 654db50,7e38f1f..0000000
+--- a/arch/arm/kernel/perf_event_v7.c
++++ b/arch/arm/kernel/perf_event_v7.c
+@@@ -916,85 -1085,94 +1085,139 @@@ static void armv7_pmnc_dump_regs(struc
+ {
+ u32 val;
+ unsigned int cnt;
++ struct arm_cpu_pmu *cpupmu = to_this_cpu_pmu(pmu);
+
+ printk(KERN_INFO "PMNC registers dump:\n");
++ printk(KERN_INFO "PMNC =0x%08x\n", __v7_pmu_read_reg(PMCR));
++ printk(KERN_INFO "CNTENS=0x%08x\n", __v7_pmu_read_reg(PMCNTENSET));
++ printk(KERN_INFO "INTENS=0x%08x\n", __v7_pmu_read_reg(PMINTENSET));
++ printk(KERN_INFO "FLAGS =0x%08x\n", __v7_pmu_read_reg(PMOVSR));
++ printk(KERN_INFO "SELECT=0x%08x\n", __v7_pmu_read_reg(PMSELR));
++ printk(KERN_INFO "CCNT =0x%08x\n", __v7_pmu_read_reg(PMCCNTR));
+
+- asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
+- printk(KERN_INFO "PMNC =0x%08x\n", val);
++ for (cnt = ARMV7_IDX_COUNTER0;
++ cnt <= ARMV7_IDX_COUNTER_LAST(pmu); cnt++) {
++ armv7_pmnc_select_counter(cpupmu, cnt);
++ printk(KERN_INFO "CNT[%d] count =0x%08x\n",
++ ARMV7_IDX_TO_COUNTER(cnt),
++ __v7_pmu_read_reg(cpupmu, PMXEVCNTR));
++ printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
++ ARMV7_IDX_TO_COUNTER(cnt),
++ __v7_pmu_read_reg(cpupmu, PMXEVTYPER));
++ }
++ }
++ #endif
+
+- asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
+- printk(KERN_INFO "CNTENS=0x%08x\n", val);
++ static void armv7pmu_save_regs(struct arm_pmu *pmu,
++ struct cpupmu_regs *regs)
++ {
++ unsigned int cnt;
++ struct arm_cpu_pmu *cpupmu = to_this_cpu_pmu(pmu);
+
+- asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
+- printk(KERN_INFO "INTENS=0x%08x\n", val);
++ if (!cpupmu->active)
++ return;
+
+- asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
+- printk(KERN_INFO "FLAGS =0x%08x\n", val);
++ if (!*cpupmu->cpu_hw_events.used_mask)
++ return;
+
+- asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
+- printk(KERN_INFO "SELECT=0x%08x\n", val);
++ if (!__v7_pmu_save_reg(cpupmu, PMCR) & ARMV7_PMNC_E)
++ return;
+
+- asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
+- printk(KERN_INFO "CCNT =0x%08x\n", val);
++ __v7_pmu_save_reg(cpupmu, PMCNTENSET);
++ __v7_pmu_save_reg(cpupmu, PMUSERENR);
++ __v7_pmu_save_reg(cpupmu, PMINTENSET);
++ __v7_pmu_save_reg(cpupmu, PMCCNTR);
+
+ for (cnt = ARMV7_IDX_COUNTER0;
+- cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
+- armv7_pmnc_select_counter(cnt);
+- asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
+- printk(KERN_INFO "CNT[%d] count =0x%08x\n",
+- ARMV7_IDX_TO_COUNTER(cnt), val);
+- asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
+- printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
+- ARMV7_IDX_TO_COUNTER(cnt), val);
++ cnt <= ARMV7_IDX_COUNTER_LAST(pmu); cnt++) {
++ armv7_pmnc_select_counter(cpupmu, cnt);
++ __v7_pmu_save_reg(cpupmu, PMSELR); /* mirror physical PMSELR */
++ __v7_pmu_save_reg(cpupmu, PMXEVTYPER);
++ __v7_pmu_save_reg(cpupmu, PMXEVCNTR);
+ }
++ return;
++ }
++
++ /* armv7pmu_reset() must be called before calling this funtion */
++ static void armv7pmu_restore_regs(struct arm_pmu *pmu,
++ struct cpupmu_regs *regs)
++ {
++ unsigned int cnt;
++ u32 pmcr;
++ struct arm_cpu_pmu *cpupmu = to_this_cpu_pmu(pmu);
++
++ armv7pmu_reset(pmu);
++
++ if (!cpupmu->active)
++ return;
++
++ if (!*cpupmu->cpu_hw_events.used_mask)
++ return;
++
++ pmcr = __v7_pmu_read_logical(cpupmu, PMCR);
++ if (!pmcr & ARMV7_PMNC_E)
++ return;
++
++ __v7_pmu_restore_reg(cpupmu, PMCNTENSET);
++ __v7_pmu_restore_reg(cpupmu, PMUSERENR);
++ __v7_pmu_restore_reg(cpupmu, PMINTENSET);
++ __v7_pmu_restore_reg(cpupmu, PMCCNTR);
++
++ for (cnt = ARMV7_IDX_COUNTER0;
++ cnt <= ARMV7_IDX_COUNTER_LAST(pmu); cnt++) {
++ armv7_pmnc_select_counter(cpupmu, cnt);
++ __v7_pmu_save_reg(cpupmu, PMSELR); /* mirror physical PMSELR */
++ __v7_pmu_restore_reg(cpupmu, PMXEVTYPER);
++ __v7_pmu_restore_reg(cpupmu, PMXEVCNTR);
++ }
++ __v7_pmu_write_reg(cpupmu, PMCR, pmcr);
+ }
+- #endif
+
+ +static void armv7pmu_save_regs(struct arm_pmu *cpu_pmu,
+ + struct cpupmu_regs *regs)
+ +{
+ + unsigned int cnt;
+ + asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (regs->pmc));
+ + if (!(regs->pmc & ARMV7_PMNC_E))
+ + return;
+ +
+ + asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (regs->pmcntenset));
+ + asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r" (regs->pmuseren));
+ + asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (regs->pmintenset));
+ + asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (regs->pmxevtcnt[0]));
+ + for (cnt = ARMV7_IDX_COUNTER0;
+ + cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
+ + armv7_pmnc_select_counter(cnt);
+ + asm volatile("mrc p15, 0, %0, c9, c13, 1"
+ + : "=r"(regs->pmxevttype[cnt]));
+ + asm volatile("mrc p15, 0, %0, c9, c13, 2"
+ + : "=r"(regs->pmxevtcnt[cnt]));
+ + }
+ + return;
+ +}
+ +
+ +static void armv7pmu_restore_regs(struct arm_pmu *cpu_pmu,
+ + struct cpupmu_regs *regs)
+ +{
+ + unsigned int cnt;
+ + if (!(regs->pmc & ARMV7_PMNC_E))
+ + return;
+ +
+ + asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (regs->pmcntenset));
+ + asm volatile("mcr p15, 0, %0, c9, c14, 0" : : "r" (regs->pmuseren));
+ + asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (regs->pmintenset));
+ + asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (regs->pmxevtcnt[0]));
+ + for (cnt = ARMV7_IDX_COUNTER0;
+ + cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
+ + armv7_pmnc_select_counter(cnt);
+ + asm volatile("mcr p15, 0, %0, c9, c13, 1"
+ + : : "r"(regs->pmxevttype[cnt]));
+ + asm volatile("mcr p15, 0, %0, c9, c13, 2"
+ + : : "r"(regs->pmxevtcnt[cnt]));
+ + }
+ + asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (regs->pmc));
+ +}
+ +
+ static void armv7pmu_enable_event(struct perf_event *event)
+ {
+ unsigned long flags;
+@@@ -1270,7 -1479,10 +1524,13 @@@ static void armv7pmu_init(struct arm_pm
+ cpu_pmu->reset = armv7pmu_reset;
+ cpu_pmu->save_regs = armv7pmu_save_regs;
+ cpu_pmu->restore_regs = armv7pmu_restore_regs;
+++<<<<<<< HEAD
+++=======
++ cpu_pmu->cpu_init = armv7pmu_cpu_init;
+++>>>>>>> tracking-lsk-vexpress-iks
+ cpu_pmu->max_period = (1LLU << 32) - 1;
++
++ cpu_pmu->cpu_pmus = cpu_pmus;
+ };
+
+ static u32 armv7_read_num_pmnc_events(void)
diff --git a/solutions/f39f86eaa6e8771669e537972f36862a/solution1 b/solutions/f39f86eaa6e8771669e537972f36862a/solution1
new file mode 100644
index 0000000..57c616a
--- /dev/null
+++ b/solutions/f39f86eaa6e8771669e537972f36862a/solution1
@@ -0,0 +1,30 @@
+--- /home/ynk/git.linaro.org/linux-linaro-stable.manifest/solutions/f39f86eaa6e8771669e537972f36862a/arch-arm-include-asm-pmu.h.orig 2013-05-25 19:14:41.641385887 +0400
++++ arch/arm/include/asm/pmu.h 2013-05-25 19:17:27.845387261 +0400
+@@ -73,8 +73,6 @@ struct cpupmu_regs {
+ u32 pmxevtcnt[8];
+ };
+
+-<<<<<<< HEAD
+-=======
+ struct arm_cpu_pmu {
+ bool valid;
+ bool active;
+@@ -90,7 +88,6 @@ struct arm_cpu_pmu {
+ void *logical_state;
+ };
+
+->>>>>>> tracking-lsk-vexpress-iks
+ struct arm_pmu {
+ struct pmu pmu;
+ cpumask_t active_irqs;
+@@ -113,10 +110,7 @@ struct arm_pmu {
+ int (*map_event)(struct perf_event *event);
+ void (*save_regs)(struct arm_pmu *, struct cpupmu_regs *);
+ void (*restore_regs)(struct arm_pmu *, struct cpupmu_regs *);
+-<<<<<<< HEAD
+-=======
+ void (*cpu_init)(struct arm_pmu *, struct arm_cpu_pmu *);
+->>>>>>> tracking-lsk-vexpress-iks
+ int num_events;
+ atomic_t active_events;
+ struct mutex reserve_mutex;
diff --git a/solutions/f39f86eaa6e8771669e537972f36862a/solution2 b/solutions/f39f86eaa6e8771669e537972f36862a/solution2
new file mode 100644
index 0000000..fab306b
--- /dev/null
+++ b/solutions/f39f86eaa6e8771669e537972f36862a/solution2
@@ -0,0 +1,285 @@
+--- /home/ynk/git.linaro.org/linux-linaro-stable.manifest/solutions/f39f86eaa6e8771669e537972f36862a/arch-arm-kernel-perf_event_cpu.c.orig 2013-05-25 19:17:27.893387263 +0400
++++ arch/arm/kernel/perf_event_cpu.c 2013-05-25 19:31:52.325394416 +0400
+@@ -19,10 +19,7 @@
+ #define pr_fmt(fmt) "CPU PMU: " fmt
+
+ #include <linux/bitmap.h>
+-<<<<<<< HEAD
+-=======
+ #include <linux/cpumask.h>
+->>>>>>> tracking-lsk-vexpress-iks
+ #include <linux/cpu_pm.h>
+ #include <linux/export.h>
+ #include <linux/kernel.h>
+@@ -40,12 +37,7 @@
+ #include <asm/smp_plat.h>
+ #include <asm/topology.h>
+
+-<<<<<<< HEAD
+-/* Set at runtime when we know what CPU type we are. */
+-static DEFINE_PER_CPU(struct arm_pmu *, cpu_pmu);
+-=======
+ static LIST_HEAD(cpu_pmus_list);
+->>>>>>> tracking-lsk-vexpress-iks
+
+ #define cpu_for_each_pmu(pmu, cpu_pmu, cpu) \
+ for_each_pmu(pmu, &cpu_pmus_list) \
+@@ -62,19 +54,13 @@ static struct arm_pmu *__cpu_find_any_pm
+ return NULL;
+ }
+
+-static DEFINE_PER_CPU(struct cpupmu_regs, cpu_pmu_regs);
+-
+ /*
+ * Despite the names, these two functions are CPU-specific and are used
+ * by the OProfile/perf code.
+ */
+ const char *perf_pmu_name(void)
+ {
+-<<<<<<< HEAD
+- struct arm_pmu *pmu = per_cpu(cpu_pmu, 0);
+-=======
+ struct arm_pmu *pmu = __cpu_find_any_pmu(0);
+->>>>>>> tracking-lsk-vexpress-iks
+ if (!pmu)
+ return NULL;
+
+@@ -84,11 +70,7 @@ EXPORT_SYMBOL_GPL(perf_pmu_name);
+
+ int perf_num_counters(void)
+ {
+-<<<<<<< HEAD
+- struct arm_pmu *pmu = per_cpu(cpu_pmu, 0);
+-=======
+ struct arm_pmu *pmu = __cpu_find_any_pmu(0);
+->>>>>>> tracking-lsk-vexpress-iks
+
+ if (!pmu)
+ return 0;
+@@ -109,22 +91,11 @@ static struct pmu_hw_events *cpu_pmu_get
+
+ static int find_logical_cpu(u32 mpidr)
+ {
+-<<<<<<< HEAD
+- int i, irq, irqs;
+- struct platform_device *pmu_device = cpu_pmu->plat_device;
+- int cpu = -1;
+-=======
+ int cpu = bL_switcher_get_logical_index(mpidr);
+->>>>>>> tracking-lsk-vexpress-iks
+
+ if (cpu != -EUNATCH)
+ return cpu;
+
+-<<<<<<< HEAD
+- for (i = 0; i < irqs; ++i) {
+- cpu = cpumask_next(cpu, &cpu_pmu->valid_cpus);
+- if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
+-=======
+ return get_logical_index(mpidr);
+ }
+
+@@ -136,7 +107,6 @@ static void cpu_pmu_free_irq(struct arm_
+
+ for_each_possible_cpu(i) {
+ if (!(cpu_pmu = per_cpu_ptr(pmu->cpu_pmus, i)))
+->>>>>>> tracking-lsk-vexpress-iks
+ continue;
+
+ if (cpu_pmu->mpidr == -1)
+@@ -156,13 +126,8 @@ static void cpu_pmu_free_irq(struct arm_
+ static int cpu_pmu_request_irq(struct arm_pmu *pmu, irq_handler_t handler)
+ {
+ int i, err, irq, irqs;
+-<<<<<<< HEAD
+- struct platform_device *pmu_device = cpu_pmu->plat_device;
+- int cpu = -1;
+-=======
+ int cpu;
+ struct arm_cpu_pmu *cpu_pmu;
+->>>>>>> tracking-lsk-vexpress-iks
+
+ irqs = 0;
+ for_each_possible_cpu(i)
+@@ -174,18 +139,11 @@ static int cpu_pmu_request_irq(struct ar
+ return -ENODEV;
+ }
+
+-<<<<<<< HEAD
+- for (i = 0; i < irqs; ++i) {
+- err = 0;
+- cpu = cpumask_next(cpu, &cpu_pmu->valid_cpus);
+- irq = platform_get_irq(pmu_device, i);
+-=======
+ for_each_possible_cpu(i) {
+ if (!(cpu_pmu = per_cpu_ptr(pmu->cpu_pmus, i)))
+ continue;
+
+ irq = cpu_pmu->irq;
+->>>>>>> tracking-lsk-vexpress-iks
+ if (irq < 0)
+ continue;
+
+@@ -215,11 +173,7 @@ static int cpu_pmu_request_irq(struct ar
+ return err;
+ }
+
+-<<<<<<< HEAD
+- cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
+-=======
+ cpumask_set_cpu(cpu, &pmu->active_irqs);
+->>>>>>> tracking-lsk-vexpress-iks
+ }
+
+ return 0;
+@@ -228,19 +182,12 @@ static int cpu_pmu_request_irq(struct ar
+ static void cpu_pmu_init(struct arm_pmu *pmu)
+ {
+ int cpu;
+-<<<<<<< HEAD
+- for_each_cpu_mask(cpu, cpu_pmu->valid_cpus) {
+- struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
+- events->events = per_cpu(hw_events, cpu);
+- events->used_mask = per_cpu(used_mask, cpu);
+-=======
+ for_each_cpu_mask(cpu, pmu->valid_cpus) {
+ struct arm_cpu_pmu *cpu_pmu = per_cpu_ptr(pmu->cpu_pmus, cpu);
+ struct pmu_hw_events *events = &cpu_pmu->cpu_hw_events;
+
+ events->events = cpu_pmu->hw_events;
+ events->used_mask = cpu_pmu->used_mask;
+->>>>>>> tracking-lsk-vexpress-iks
+ raw_spin_lock_init(&events->pmu_lock);
+
+ if (pmu->cpu_init)
+@@ -254,13 +201,8 @@ static void cpu_pmu_init(struct arm_pmu
+ pmu->free_irq = cpu_pmu_free_irq;
+
+ /* Ensure the PMU has sane values out of reset. */
+-<<<<<<< HEAD
+- if (cpu_pmu->reset)
+- on_each_cpu_mask(&cpu_pmu->valid_cpus, cpu_pmu->reset, cpu_pmu, 1);
+-=======
+ if (pmu->reset)
+ on_each_cpu_mask(&pmu->valid_cpus, pmu->reset, pmu, 1);
+->>>>>>> tracking-lsk-vexpress-iks
+ }
+
+ /*
+@@ -272,29 +214,18 @@ static void cpu_pmu_init(struct arm_pmu
+ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
+ unsigned long action, void *hcpu)
+ {
+-<<<<<<< HEAD
+- struct arm_pmu *pmu = per_cpu(cpu_pmu, (long)hcpu);
+-=======
+ struct arm_pmu *pmu;
+ struct arm_cpu_pmu *cpu_pmu;
+ int ret = NOTIFY_DONE;
+->>>>>>> tracking-lsk-vexpress-iks
+
+ if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
+ return NOTIFY_DONE;
+
+-<<<<<<< HEAD
+- if (pmu && pmu->reset)
+- pmu->reset(pmu);
+- else
+- return NOTIFY_DONE;
+-=======
+ cpu_for_each_pmu(pmu, cpu_pmu, (unsigned int)hcpu)
+ if (pmu->reset) {
+ pmu->reset(pmu);
+ ret = NOTIFY_OK;
+ }
+->>>>>>> tracking-lsk-vexpress-iks
+
+ return ret;
+ }
+@@ -321,25 +252,6 @@ static int cpu_pmu_pm_notify(struct noti
+ return ret;
+ }
+
+-static int cpu_pmu_pm_notify(struct notifier_block *b,
+- unsigned long action, void *hcpu)
+-{
+- int cpu = smp_processor_id();
+- struct arm_pmu *pmu = per_cpu(cpu_pmu, cpu);
+- struct cpupmu_regs *pmuregs = &per_cpu(cpu_pmu_regs, cpu);
+-
+- if (!pmu)
+- return NOTIFY_DONE;
+-
+- if (action == CPU_PM_ENTER && pmu->save_regs) {
+- pmu->save_regs(pmu, pmuregs);
+- } else if (action == CPU_PM_EXIT && pmu->restore_regs) {
+- pmu->restore_regs(pmu, pmuregs);
+- }
+-
+- return NOTIFY_OK;
+-}
+-
+ static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
+ .notifier_call = cpu_pmu_notify,
+ };
+@@ -502,13 +414,8 @@ static int cpu_pmu_device_probe(struct p
+ const struct of_device_id *of_id;
+ struct device_node *node = pdev->dev.of_node;
+ struct arm_pmu *pmu;
+-<<<<<<< HEAD
+- int ret = 0;
+- int cpu;
+-=======
+ struct arm_cpu_pmu __percpu *cpu_pmus;
+ int ret = 0;
+->>>>>>> tracking-lsk-vexpress-iks
+
+ pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
+ if (!pmu)
+@@ -523,11 +430,8 @@ static int cpu_pmu_device_probe(struct p
+ struct device_node *ncluster;
+ int cluster = -1;
+ cpumask_t sibling_mask;
+-<<<<<<< HEAD
+-=======
+ cpumask_t phys_sibling_mask;
+ unsigned int i;
+->>>>>>> tracking-lsk-vexpress-iks
+
+ ncluster = of_parse_phandle(node, "cluster", 0);
+ if (ncluster) {
+@@ -538,13 +442,6 @@ static int cpu_pmu_device_probe(struct p
+ cluster = be32_to_cpup(hwid);
+ }
+ /* set sibling mask to all cpu mask if socket is not specified */
+-<<<<<<< HEAD
+- if (cluster == -1 ||
+- cluster_to_logical_mask(cluster, &sibling_mask))
+- cpumask_setall(&sibling_mask);
+-
+- smp_call_function_any(&sibling_mask, init_fn, pmu, 1);
+-=======
+ /*
+ * In a switcher kernel, we affine all PMUs to CPUs and
+ * abstract the runtime presence/absence of PMUs at a lower
+@@ -598,7 +495,6 @@ static int cpu_pmu_device_probe(struct p
+ smp_call_function_any(&phys_sibling_mask, init_fn, pmu, 1);
+
+ bL_switcher_put_enabled();
+->>>>>>> tracking-lsk-vexpress-iks
+
+ /* now set the valid_cpus after init */
+ cpumask_copy(&pmu->valid_cpus, &sibling_mask);
+@@ -609,12 +505,6 @@ static int cpu_pmu_device_probe(struct p
+ if (ret)
+ goto error;
+
+-<<<<<<< HEAD
+- for_each_cpu_mask(cpu, pmu->valid_cpus)
+- per_cpu(cpu_pmu, cpu) = pmu;
+-
+-=======
+->>>>>>> tracking-lsk-vexpress-iks
+ pmu->plat_device = pdev;
+ cpu_pmu_init(pmu);
+ ret = armpmu_register(pmu, -1);
diff --git a/solutions/f39f86eaa6e8771669e537972f36862a/solution3 b/solutions/f39f86eaa6e8771669e537972f36862a/solution3
new file mode 100644
index 0000000..c46e438
--- /dev/null
+++ b/solutions/f39f86eaa6e8771669e537972f36862a/solution3
@@ -0,0 +1,65 @@
+--- /home/ynk/git.linaro.org/linux-linaro-stable.manifest/solutions/f39f86eaa6e8771669e537972f36862a/arch-arm-kernel-perf_event_v7.c.orig 2013-05-25 19:31:52.389394417 +0400
++++ arch/arm/kernel/perf_event_v7.c 2013-05-25 19:42:09.293399522 +0400
+@@ -1173,51 +1173,6 @@ static void armv7pmu_restore_regs(struct
+ __v7_pmu_write_reg(cpupmu, PMCR, pmcr);
+ }
+
+-static void armv7pmu_save_regs(struct arm_pmu *cpu_pmu,
+- struct cpupmu_regs *regs)
+-{
+- unsigned int cnt;
+- asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (regs->pmc));
+- if (!(regs->pmc & ARMV7_PMNC_E))
+- return;
+-
+- asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (regs->pmcntenset));
+- asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r" (regs->pmuseren));
+- asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (regs->pmintenset));
+- asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (regs->pmxevtcnt[0]));
+- for (cnt = ARMV7_IDX_COUNTER0;
+- cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
+- armv7_pmnc_select_counter(cnt);
+- asm volatile("mrc p15, 0, %0, c9, c13, 1"
+- : "=r"(regs->pmxevttype[cnt]));
+- asm volatile("mrc p15, 0, %0, c9, c13, 2"
+- : "=r"(regs->pmxevtcnt[cnt]));
+- }
+- return;
+-}
+-
+-static void armv7pmu_restore_regs(struct arm_pmu *cpu_pmu,
+- struct cpupmu_regs *regs)
+-{
+- unsigned int cnt;
+- if (!(regs->pmc & ARMV7_PMNC_E))
+- return;
+-
+- asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (regs->pmcntenset));
+- asm volatile("mcr p15, 0, %0, c9, c14, 0" : : "r" (regs->pmuseren));
+- asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (regs->pmintenset));
+- asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (regs->pmxevtcnt[0]));
+- for (cnt = ARMV7_IDX_COUNTER0;
+- cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
+- armv7_pmnc_select_counter(cnt);
+- asm volatile("mcr p15, 0, %0, c9, c13, 1"
+- : : "r"(regs->pmxevttype[cnt]));
+- asm volatile("mcr p15, 0, %0, c9, c13, 2"
+- : : "r"(regs->pmxevtcnt[cnt]));
+- }
+- asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (regs->pmc));
+-}
+-
+ static void armv7pmu_enable_event(struct perf_event *event)
+ {
+ unsigned long flags;
+@@ -1524,10 +1479,7 @@ static void armv7pmu_init(struct arm_pmu
+ cpu_pmu->reset = armv7pmu_reset;
+ cpu_pmu->save_regs = armv7pmu_save_regs;
+ cpu_pmu->restore_regs = armv7pmu_restore_regs;
+-<<<<<<< HEAD
+-=======
+ cpu_pmu->cpu_init = armv7pmu_cpu_init;
+->>>>>>> tracking-lsk-vexpress-iks
+ cpu_pmu->max_period = (1LLU << 32) - 1;
+
+ cpu_pmu->cpu_pmus = cpu_pmus;