aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/events
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/events')
-rw-r--r--arch/x86/events/Kconfig8
-rw-r--r--arch/x86/events/core.c15
2 files changed, 9 insertions, 14 deletions
diff --git a/arch/x86/events/Kconfig b/arch/x86/events/Kconfig
index 4a809c6cbd2f..39d9ded9e25a 100644
--- a/arch/x86/events/Kconfig
+++ b/arch/x86/events/Kconfig
@@ -5,7 +5,7 @@ config PERF_EVENTS_INTEL_UNCORE
tristate "Intel uncore performance events"
depends on PERF_EVENTS && CPU_SUP_INTEL && PCI
default y
- ---help---
+ help
Include support for Intel uncore performance events. These are
available on NehalemEX and more modern processors.
@@ -13,7 +13,7 @@ config PERF_EVENTS_INTEL_RAPL
tristate "Intel/AMD rapl performance events"
depends on PERF_EVENTS && (CPU_SUP_INTEL || CPU_SUP_AMD) && PCI
default y
- ---help---
+ help
Include support for Intel and AMD rapl performance events for power
monitoring on modern processors.
@@ -21,14 +21,14 @@ config PERF_EVENTS_INTEL_CSTATE
tristate "Intel cstate performance events"
depends on PERF_EVENTS && CPU_SUP_INTEL && PCI
default y
- ---help---
+ help
Include support for Intel cstate performance events for power
monitoring on modern processors.
config PERF_EVENTS_AMD_POWER
depends on PERF_EVENTS && CPU_SUP_AMD
tristate "AMD Processor Power Reporting Mechanism"
- ---help---
+ help
Provide power reporting mechanism support for AMD processors.
Currently, it leverages X86_FEATURE_ACC_POWER
(CPUID Fn8000_0007_EDX[12]) interface to calculate the
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 9e63ee50b19a..4103665c6e03 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2166,11 +2166,6 @@ static int x86_pmu_event_init(struct perf_event *event)
return err;
}
-static void refresh_pce(void *ignored)
-{
- load_mm_cr4_irqsoff(this_cpu_read(cpu_tlbstate.loaded_mm));
-}
-
static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
{
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
@@ -2183,13 +2178,13 @@ static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
* userspace with CR4.PCE clear while another task is still
* doing on_each_cpu_mask() to propagate CR4.PCE.
*
- * For now, this can't happen because all callers hold mmap_sem
+ * For now, this can't happen because all callers hold mmap_lock
* for write. If this changes, we'll need a different solution.
*/
- lockdep_assert_held_write(&mm->mmap_sem);
+ mmap_assert_write_locked(mm);
if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
- on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
+ on_each_cpu_mask(mm_cpumask(mm), cr4_update_pce, NULL, 1);
}
static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
@@ -2199,7 +2194,7 @@ static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *m
return;
if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
- on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
+ on_each_cpu_mask(mm_cpumask(mm), cr4_update_pce, NULL, 1);
}
static int x86_pmu_event_idx(struct perf_event *event)
@@ -2257,7 +2252,7 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
else if (x86_pmu.attr_rdpmc == 2)
static_branch_dec(&rdpmc_always_available_key);
- on_each_cpu(refresh_pce, NULL, 1);
+ on_each_cpu(cr4_update_pce, NULL, 1);
x86_pmu.attr_rdpmc = val;
}