From ab573844e3058eef2788803d373019f8bebead57 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 1 May 2013 17:25:44 +0200 Subject: perf: Fix hw breakpoints overflow period sampling The hw breakpoint pmu 'add' function is missing the period_left update needed for SW events. The perf HW breakpoint events use the SW events framework to process the overflow, so it needs to be properly initialized in the PMU 'add' method. Signed-off-by: Jiri Olsa Reviewed-by: Peter Zijlstra Cc: H. Peter Anvin Cc: Oleg Nesterov Cc: Arnaldo Carvalho de Melo Cc: Ingo Molnar Cc: Paul Mackerras Cc: Corey Ashford Cc: Frederic Weisbecker Cc: Vince Weaver Cc: Stephane Eranian Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1367421944-19082-5-git-send-email-jolsa@redhat.com Signed-off-by: Ingo Molnar --- kernel/events/core.c | 2 +- kernel/events/hw_breakpoint.c | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index 9dc297faf7c0..e0dcced282e4 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -4961,7 +4961,7 @@ static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); * sign as trigger. */ -static u64 perf_swevent_set_period(struct perf_event *event) +u64 perf_swevent_set_period(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; u64 period = hwc->last_period; diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index a64f8aeb5c1f..966a241e8616 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -612,6 +612,11 @@ static int hw_breakpoint_add(struct perf_event *bp, int flags) if (!(flags & PERF_EF_START)) bp->hw.state = PERF_HES_STOPPED; + if (is_sampling_event(bp)) { + bp->hw.last_period = bp->hw.sample_period; + perf_swevent_set_period(bp); + } + return arch_install_hw_breakpoint(bp); } -- cgit v1.2.3 From 9e6302056f8029f438e853432a856b9f13de26a6 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Wed, 3 Apr 2013 14:21:33 +0200 Subject: perf: Use hrtimers for event multiplexing The current scheme of using the timer tick was fine for per-thread events. However, it was causing bias issues in system-wide mode (including for uncore PMUs). Event groups would not get their fair share of runtime on the PMU. With tickless kernels, if a core is idle there is no timer tick, and thus no event rotation (multiplexing). However, there are events (especially uncore events) which do count even though cores are asleep. This patch changes the timer source for multiplexing. It introduces a per-PMU per-cpu hrtimer. The advantage is that even when a core goes idle, it will come back to service the hrtimer, thus multiplexing on system-wide events works much better. The per-PMU implementation (suggested by PeterZ) enables adjusting the multiplexing interval per PMU. The preferred interval is stashed into the struct pmu. If not set, it will be forced to the default interval value. In order to minimize the impact of the hrtimer, it is turned on and off on demand. When the PMU on a CPU is overcommited, the hrtimer is activated. It is stopped when the PMU is not overcommitted. In order for this to work properly, we had to change the order of initialization in start_kernel() such that hrtimer_init() is run before perf_event_init(). The default interval in milliseconds is set to a timer tick just like with the old code. We will provide a sysctl to tune this in another patch. Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo Link: http://lkml.kernel.org/r/1364991694-5876-2-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar --- kernel/events/core.c | 114 +++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 106 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index e0dcced282e4..97bfac7e6f45 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -170,6 +170,8 @@ int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE; static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ); +static int perf_rotate_context(struct perf_cpu_context *cpuctx); + int perf_proc_update_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) @@ -658,6 +660,98 @@ perf_cgroup_mark_enabled(struct perf_event *event, } #endif +/* + * set default to be dependent on timer tick just + * like original code + */ +#define PERF_CPU_HRTIMER (1000 / HZ) +/* + * function must be called with interrupts disbled + */ +static enum hrtimer_restart perf_cpu_hrtimer_handler(struct hrtimer *hr) +{ + struct perf_cpu_context *cpuctx; + enum hrtimer_restart ret = HRTIMER_NORESTART; + int rotations = 0; + + WARN_ON(!irqs_disabled()); + + cpuctx = container_of(hr, struct perf_cpu_context, hrtimer); + + rotations = perf_rotate_context(cpuctx); + + /* + * arm timer if needed + */ + if (rotations) { + hrtimer_forward_now(hr, cpuctx->hrtimer_interval); + ret = HRTIMER_RESTART; + } + + return ret; +} + +/* CPU is going down */ +void perf_cpu_hrtimer_cancel(int cpu) +{ + struct perf_cpu_context *cpuctx; + struct pmu *pmu; + unsigned long flags; + + if (WARN_ON(cpu != smp_processor_id())) + return; + + local_irq_save(flags); + + rcu_read_lock(); + + list_for_each_entry_rcu(pmu, &pmus, entry) { + cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); + + if (pmu->task_ctx_nr == perf_sw_context) + continue; + + hrtimer_cancel(&cpuctx->hrtimer); + } + + rcu_read_unlock(); + + local_irq_restore(flags); +} + +static void __perf_cpu_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) +{ + struct hrtimer *hr = &cpuctx->hrtimer; + struct pmu *pmu = cpuctx->ctx.pmu; + + /* no multiplexing needed for SW PMU */ + if (pmu->task_ctx_nr == perf_sw_context) + return; + + cpuctx->hrtimer_interval = + ns_to_ktime(NSEC_PER_MSEC * PERF_CPU_HRTIMER); + + hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); + hr->function = perf_cpu_hrtimer_handler; +} + +static void perf_cpu_hrtimer_restart(struct perf_cpu_context *cpuctx) +{ + struct hrtimer *hr = &cpuctx->hrtimer; + struct pmu *pmu = cpuctx->ctx.pmu; + + /* not for SW PMU */ + if (pmu->task_ctx_nr == perf_sw_context) + return; + + if (hrtimer_active(hr)) + return; + + if (!hrtimer_callback_running(hr)) + __hrtimer_start_range_ns(hr, cpuctx->hrtimer_interval, + 0, HRTIMER_MODE_REL_PINNED, 0); +} + void perf_pmu_disable(struct pmu *pmu) { int *count = this_cpu_ptr(pmu->pmu_disable_count); @@ -1506,6 +1600,7 @@ group_sched_in(struct perf_event *group_event, if (event_sched_in(group_event, cpuctx, ctx)) { pmu->cancel_txn(pmu); + perf_cpu_hrtimer_restart(cpuctx); return -EAGAIN; } @@ -1552,6 +1647,8 @@ group_error: pmu->cancel_txn(pmu); + perf_cpu_hrtimer_restart(cpuctx); + return -EAGAIN; } @@ -1807,8 +1904,10 @@ static int __perf_event_enable(void *info) * If this event can't go on and it's part of a * group, then the whole group has to come off. */ - if (leader != event) + if (leader != event) { group_sched_out(leader, cpuctx, ctx); + perf_cpu_hrtimer_restart(cpuctx); + } if (leader->attr.pinned) { update_group_times(leader); leader->state = PERF_EVENT_STATE_ERROR; @@ -2555,7 +2654,7 @@ static void rotate_ctx(struct perf_event_context *ctx) * because they're strictly cpu affine and rotate_start is called with IRQs * disabled, while rotate_context is called from IRQ context. */ -static void perf_rotate_context(struct perf_cpu_context *cpuctx) +static int perf_rotate_context(struct perf_cpu_context *cpuctx) { struct perf_event_context *ctx = NULL; int rotate = 0, remove = 1; @@ -2594,6 +2693,8 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx) done: if (remove) list_del_init(&cpuctx->rotation_list); + + return rotate; } #ifdef CONFIG_NO_HZ_FULL @@ -2625,10 +2726,6 @@ void perf_event_task_tick(void) ctx = cpuctx->task_ctx; if (ctx) perf_adjust_freq_unthr_context(ctx, throttled); - - if (cpuctx->jiffies_interval == 1 || - !(jiffies % cpuctx->jiffies_interval)) - perf_rotate_context(cpuctx); } } @@ -6001,7 +6098,9 @@ skip_type: lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); cpuctx->ctx.type = cpu_context; cpuctx->ctx.pmu = pmu; - cpuctx->jiffies_interval = 1; + + __perf_cpu_hrtimer_init(cpuctx, cpu); + INIT_LIST_HEAD(&cpuctx->rotation_list); cpuctx->unique_pmu = pmu; } @@ -7387,7 +7486,6 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) case CPU_DOWN_PREPARE: perf_event_exit_cpu(cpu); break; - default: break; } -- cgit v1.2.3 From 62b8563979273424d6ebe9201e34d1acc133ad4f Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Wed, 3 Apr 2013 14:21:34 +0200 Subject: perf: Add sysfs entry to adjust multiplexing interval per PMU This patch adds /sys/device/xxx/perf_event_mux_interval_ms to ajust the multiplexing interval per PMU. The unit is milliseconds. Value has to be >= 1. In the 4th version, we renamed the sysfs file to be more consistent with the other /proc/sys/kernel entries for perf_events. In the 5th version, we handle the reprogramming of the hrtimer using hrtimer_forward_now(). That way, we sync up to new timer value quickly (suggested by Jiri Olsa). Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo Link: http://lkml.kernel.org/r/1364991694-5876-3-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar --- kernel/events/core.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 59 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index 97bfac7e6f45..53d1b300116a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -723,13 +723,21 @@ static void __perf_cpu_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) { struct hrtimer *hr = &cpuctx->hrtimer; struct pmu *pmu = cpuctx->ctx.pmu; + int timer; /* no multiplexing needed for SW PMU */ if (pmu->task_ctx_nr == perf_sw_context) return; - cpuctx->hrtimer_interval = - ns_to_ktime(NSEC_PER_MSEC * PERF_CPU_HRTIMER); + /* + * check default is sane, if not set then force to + * default interval (1/tick) + */ + timer = pmu->hrtimer_interval_ms; + if (timer < 1) + timer = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER; + + cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); hr->function = perf_cpu_hrtimer_handler; @@ -6001,9 +6009,56 @@ type_show(struct device *dev, struct device_attribute *attr, char *page) return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type); } +static ssize_t +perf_event_mux_interval_ms_show(struct device *dev, + struct device_attribute *attr, + char *page) +{ + struct pmu *pmu = dev_get_drvdata(dev); + + return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms); +} + +static ssize_t +perf_event_mux_interval_ms_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pmu *pmu = dev_get_drvdata(dev); + int timer, cpu, ret; + + ret = kstrtoint(buf, 0, &timer); + if (ret) + return ret; + + if (timer < 1) + return -EINVAL; + + /* same value, noting to do */ + if (timer == pmu->hrtimer_interval_ms) + return count; + + pmu->hrtimer_interval_ms = timer; + + /* update all cpuctx for this PMU */ + for_each_possible_cpu(cpu) { + struct perf_cpu_context *cpuctx; + cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); + cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); + + if (hrtimer_active(&cpuctx->hrtimer)) + hrtimer_forward_now(&cpuctx->hrtimer, cpuctx->hrtimer_interval); + } + + return count; +} + +#define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store) + static struct device_attribute pmu_dev_attrs[] = { - __ATTR_RO(type), - __ATTR_NULL, + __ATTR_RO(type), + __ATTR_RW(perf_event_mux_interval_ms), + __ATTR_NULL, }; static int pmu_bus_running; -- cgit v1.2.3 From 2b923c8f5de6722393e614b096d5040b6d4eaf98 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Tue, 21 May 2013 12:53:37 +0200 Subject: perf/x86: Check branch sampling priv level in generic code This patch moves commit 7cc23cd to the generic code: perf/x86/intel/lbr: Demand proper privileges for PERF_SAMPLE_BRANCH_KERNEL The check is now implemented in generic code instead of x86 specific code. That way we do not have to repeat the test in each arch supporting branch sampling. Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Arnaldo Carvalho de Melo Link: http://lkml.kernel.org/r/20130521105337.GA2879@quad Signed-off-by: Ingo Molnar --- kernel/events/core.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index 53d1b300116a..a0780b3a3d50 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6481,11 +6481,6 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr, if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL)) return -EINVAL; - /* kernel level capture: check permissions */ - if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM) - && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) - return -EACCES; - /* propagate priv level, when not set for branch */ if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) { @@ -6503,6 +6498,10 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr, */ attr->branch_sample_type = mask; } + /* kernel level capture: check permissions */ + if ((mask & PERF_SAMPLE_BRANCH_KERNEL) + && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) + return -EACCES; } if (attr->sample_type & PERF_SAMPLE_REGS_USER) { -- cgit v1.2.3 From a6572f84c5b135d9b6df279ed3c8de028bd1edd9 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Fri, 17 May 2013 10:31:04 +0800 Subject: watchdog: Disallow setting watchdog_thresh to -1 In old kernels, it's allowed to set softlockup_thresh to -1 or 0 to disable softlockup detection. However watchdog_thresh only uses 0 to disable detection, and setting it to -1 just froze my box and nothing I can do but reboot. Signed-off-by: Li Zefan Acked-by: Don Zickus Link: http://lkml.kernel.org/r/51959668.9040106@huawei.com Signed-off-by: Ingo Molnar --- kernel/sysctl.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 9edcf456e0fc..b0a1f99907f3 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -120,7 +120,6 @@ extern int blk_iopoll_enabled; /* Constants used for minimum and maximum */ #ifdef CONFIG_LOCKUP_DETECTOR static int sixty = 60; -static int neg_one = -1; #endif static int zero; @@ -814,7 +813,7 @@ static struct ctl_table kern_table[] = { .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dowatchdog, - .extra1 = &neg_one, + .extra1 = &zero, .extra2 = &sixty, }, { -- cgit v1.2.3 From e712209a9e0b70e78b13847738eb66fe37412515 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Thu, 6 Jun 2013 11:02:04 +0200 Subject: perf: Fix hypervisor branch sampling permission check Commit 2b923c8 perf/x86: Check branch sampling priv level in generic code was missing the check for the hypervisor (HV) priv level, so add it back. With this patch, we get the following correct behavior: # echo 2 >/proc/sys/kernel/perf_event_paranoid $ perf record -j any,k noploop 1 Error: You may not have permission to collect stats. Consider tweaking /proc/sys/kernel/perf_event_paranoid: -1 - Not paranoid at all 0 - Disallow raw tracepoint access for unpriv 1 - Disallow cpu events for unpriv 2 - Disallow kernel profiling for unpriv $ perf record -j any,hv noploop 1 Error: You may not have permission to collect stats. Consider tweaking /proc/sys/kernel/perf_event_paranoid: -1 - Not paranoid at all 0 - Disallow raw tracepoint access for unpriv 1 - Disallow cpu events for unpriv 2 - Disallow kernel profiling for unpriv Signed-off-by: Stephane Eranian Acked-by: Petr Matousek Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20130606090204.GA3725@quad Signed-off-by: Ingo Molnar --- kernel/events/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index d0e0d0d2025f..aca95bce34c8 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6573,8 +6573,8 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr, */ attr->branch_sample_type = mask; } - /* kernel level capture: check permissions */ - if ((mask & PERF_SAMPLE_BRANCH_KERNEL) + /* privileged levels capture (kernel, hv): check permissions */ + if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM) && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) return -EACCES; } -- cgit v1.2.3 From 03d8e80beb7db78a13c192431205b9c83f7e0cd1 Mon Sep 17 00:00:00 2001 From: Mischa Jonker Date: Tue, 4 Jun 2013 11:45:48 +0200 Subject: perf: Add const qualifier to perf_pmu_register's 'name' arg This allows us to use pdev->name for registering a PMU device. IMO the name is not supposed to be changed anyway. Signed-off-by: Mischa Jonker Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1370339148-5566-1-git-send-email-mjonker@synopsys.com Signed-off-by: Ingo Molnar --- kernel/events/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index aca95bce34c8..9c8920783317 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6179,7 +6179,7 @@ free_dev: static struct lock_class_key cpuctx_mutex; static struct lock_class_key cpuctx_lock; -int perf_pmu_register(struct pmu *pmu, char *name, int type) +int perf_pmu_register(struct pmu *pmu, const char *name, int type) { int cpu, ret; -- cgit v1.2.3 From e1ebe86203e6532eb5a0ae8f26ccae47aca548ae Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 20 Jun 2013 17:50:11 +0200 Subject: hw_breakpoint: Simplify list/idx mess in toggle_bp_slot() paths The enable/disable logic in toggle_bp_slot() is not symmetrical and imho very confusing. "old_count" in toggle_bp_task_slot() is actually new_count because this bp was already removed from the list. Change toggle_bp_slot() to always call list_add/list_del after toggle_bp_task_slot(). This way old_idx is task_bp_pinned() and this entry should be decremented, new_idx is +/-weight and we need to increment this element. The code/logic looks obvious. Reported-by: Vince Weaver Signed-off-by: Oleg Nesterov Acked-by: Frederic Weisbecker Link: http://lkml.kernel.org/r/20130620155011.GA6330@redhat.com Signed-off-by: Ingo Molnar --- kernel/events/hw_breakpoint.c | 40 ++++++++++++++++------------------------ 1 file changed, 16 insertions(+), 24 deletions(-) (limited to 'kernel') diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index ef8ebe560949..dee0148dcf54 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -185,26 +185,20 @@ fetch_this_slot(struct bp_busy_slots *slots, int weight) static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable, enum bp_type_idx type, int weight) { - unsigned int *tsk_pinned; - int old_count = 0; - int old_idx = 0; - int idx = 0; - - old_count = task_bp_pinned(cpu, bp, type); - old_idx = old_count - 1; - idx = old_idx + weight; - - /* tsk_pinned[n] is the number of tasks having n breakpoints */ - tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); - if (enable) { - tsk_pinned[idx]++; - if (old_count > 0) - tsk_pinned[old_idx]--; - } else { - tsk_pinned[idx]--; - if (old_count > 0) - tsk_pinned[old_idx]++; - } + /* tsk_pinned[n-1] is the number of tasks having n>0 breakpoints */ + unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); + int old_idx, new_idx; + + old_idx = task_bp_pinned(cpu, bp, type) - 1; + if (enable) + new_idx = old_idx + weight; + else + new_idx = old_idx - weight; + + if (old_idx >= 0) + tsk_pinned[old_idx]--; + if (new_idx >= 0) + tsk_pinned[new_idx]++; } /* @@ -228,10 +222,6 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, } /* Pinned counter task profiling */ - - if (!enable) - list_del(&bp->hw.bp_list); - if (cpu >= 0) { toggle_bp_task_slot(bp, cpu, enable, type, weight); } else { @@ -241,6 +231,8 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, if (enable) list_add_tail(&bp->hw.bp_list, &bp_task_head); + else + list_del(&bp->hw.bp_list); } /* -- cgit v1.2.3 From 7ab71f3244e9f970c29566c5a67e13d1fa38c387 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 20 Jun 2013 17:50:13 +0200 Subject: hw_breakpoint: Simplify the "weight" usage in toggle_bp_slot() paths Change toggle_bp_slot() to make "weight" negative if !enable. This way we can always use "+ weight" without additional "if (enable)" check and toggle_bp_task_slot() no longer needs this arg. Reported-by: Vince Weaver Signed-off-by: Oleg Nesterov Acked-by: Frederic Weisbecker Link: http://lkml.kernel.org/r/20130620155013.GA6337@redhat.com Signed-off-by: Ingo Molnar --- kernel/events/hw_breakpoint.c | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index dee0148dcf54..5cd4f6d9652c 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -182,7 +182,7 @@ fetch_this_slot(struct bp_busy_slots *slots, int weight) /* * Add a pinned breakpoint for the given task in our constraint table */ -static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable, +static void toggle_bp_task_slot(struct perf_event *bp, int cpu, enum bp_type_idx type, int weight) { /* tsk_pinned[n-1] is the number of tasks having n>0 breakpoints */ @@ -190,10 +190,7 @@ static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable, int old_idx, new_idx; old_idx = task_bp_pinned(cpu, bp, type) - 1; - if (enable) - new_idx = old_idx + weight; - else - new_idx = old_idx - weight; + new_idx = old_idx + weight; if (old_idx >= 0) tsk_pinned[old_idx]--; @@ -211,22 +208,21 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, int cpu = bp->cpu; struct task_struct *tsk = bp->hw.bp_target; + if (!enable) + weight = -weight; + /* Pinned counter cpu profiling */ if (!tsk) { - - if (enable) - per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight; - else - per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight; + per_cpu(nr_cpu_bp_pinned[type], cpu) += weight; return; } /* Pinned counter task profiling */ if (cpu >= 0) { - toggle_bp_task_slot(bp, cpu, enable, type, weight); + toggle_bp_task_slot(bp, cpu, type, weight); } else { for_each_possible_cpu(cpu) - toggle_bp_task_slot(bp, cpu, enable, type, weight); + toggle_bp_task_slot(bp, cpu, type, weight); } if (enable) -- cgit v1.2.3 From 1c10adbb929936316f71df089ace699fce037e24 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 20 Jun 2013 17:50:15 +0200 Subject: hw_breakpoint: Introduce cpumask_of_bp() Add the trivial helper which simply returns cpumask_of() or cpu_possible_mask depending on bp->cpu. Change fetch_bp_busy_slots() and toggle_bp_slot() to always do for_each_cpu(cpumask_of_bp) to simplify the code and avoid the code duplication. Reported-by: Vince Weaver Signed-off-by: Oleg Nesterov Acked-by: Frederic Weisbecker Link: http://lkml.kernel.org/r/20130620155015.GA6340@redhat.com Signed-off-by: Ingo Molnar --- kernel/events/hw_breakpoint.c | 43 +++++++++++++++++-------------------------- 1 file changed, 17 insertions(+), 26 deletions(-) (limited to 'kernel') diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index 5cd4f6d9652c..9c71445328af 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -127,6 +127,13 @@ static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type) return count; } +static const struct cpumask *cpumask_of_bp(struct perf_event *bp) +{ + if (bp->cpu >= 0) + return cpumask_of(bp->cpu); + return cpu_possible_mask; +} + /* * Report the number of pinned/un-pinned breakpoints we have in * a given cpu (cpu > -1) or in all of them (cpu = -1). @@ -135,25 +142,13 @@ static void fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, enum bp_type_idx type) { - int cpu = bp->cpu; - struct task_struct *tsk = bp->hw.bp_target; - - if (cpu >= 0) { - slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu); - if (!tsk) - slots->pinned += max_task_bp_pinned(cpu, type); - else - slots->pinned += task_bp_pinned(cpu, bp, type); - slots->flexible = per_cpu(nr_bp_flexible[type], cpu); - - return; - } + const struct cpumask *cpumask = cpumask_of_bp(bp); + int cpu; - for_each_possible_cpu(cpu) { - unsigned int nr; + for_each_cpu(cpu, cpumask) { + unsigned int nr = per_cpu(nr_cpu_bp_pinned[type], cpu); - nr = per_cpu(nr_cpu_bp_pinned[type], cpu); - if (!tsk) + if (!bp->hw.bp_target) nr += max_task_bp_pinned(cpu, type); else nr += task_bp_pinned(cpu, bp, type); @@ -205,25 +200,21 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, int weight) { - int cpu = bp->cpu; - struct task_struct *tsk = bp->hw.bp_target; + const struct cpumask *cpumask = cpumask_of_bp(bp); + int cpu; if (!enable) weight = -weight; /* Pinned counter cpu profiling */ - if (!tsk) { - per_cpu(nr_cpu_bp_pinned[type], cpu) += weight; + if (!bp->hw.bp_target) { + per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight; return; } /* Pinned counter task profiling */ - if (cpu >= 0) { + for_each_cpu(cpu, cpumask) toggle_bp_task_slot(bp, cpu, type, weight); - } else { - for_each_possible_cpu(cpu) - toggle_bp_task_slot(bp, cpu, type, weight); - } if (enable) list_add_tail(&bp->hw.bp_list, &bp_task_head); -- cgit v1.2.3 From e12cbc10cb27fcbe51b5f68e2015138dc451a2eb Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 20 Jun 2013 17:50:18 +0200 Subject: hw_breakpoint: Simplify *register_wide_hw_breakpoint() 1. register_wide_hw_breakpoint() can use unregister_ if failure, no need to duplicate the code. 2. "struct perf_event **pevent" adds the unnecesary lever of indirection and complication, use per_cpu(*cpu_events, cpu). Reported-by: Vince Weaver Signed-off-by: Oleg Nesterov Acked-by: Frederic Weisbecker Link: http://lkml.kernel.org/r/20130620155018.GA6347@redhat.com Signed-off-by: Ingo Molnar --- kernel/events/hw_breakpoint.c | 34 +++++++++++----------------------- 1 file changed, 11 insertions(+), 23 deletions(-) (limited to 'kernel') diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index 9c71445328af..38418f786f36 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -497,8 +497,8 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr, perf_overflow_handler_t triggered, void *context) { - struct perf_event * __percpu *cpu_events, **pevent, *bp; - long err; + struct perf_event * __percpu *cpu_events, *bp; + long err = 0; int cpu; cpu_events = alloc_percpu(typeof(*cpu_events)); @@ -507,31 +507,21 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr, get_online_cpus(); for_each_online_cpu(cpu) { - pevent = per_cpu_ptr(cpu_events, cpu); bp = perf_event_create_kernel_counter(attr, cpu, NULL, triggered, context); - - *pevent = bp; - if (IS_ERR(bp)) { err = PTR_ERR(bp); - goto fail; + break; } - } - put_online_cpus(); - - return cpu_events; -fail: - for_each_online_cpu(cpu) { - pevent = per_cpu_ptr(cpu_events, cpu); - if (IS_ERR(*pevent)) - break; - unregister_hw_breakpoint(*pevent); + per_cpu(*cpu_events, cpu) = bp; } put_online_cpus(); - free_percpu(cpu_events); + if (likely(!err)) + return cpu_events; + + unregister_wide_hw_breakpoint(cpu_events); return (void __percpu __force *)ERR_PTR(err); } EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); @@ -543,12 +533,10 @@ EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) { int cpu; - struct perf_event **pevent; - for_each_possible_cpu(cpu) { - pevent = per_cpu_ptr(cpu_events, cpu); - unregister_hw_breakpoint(*pevent); - } + for_each_possible_cpu(cpu) + unregister_hw_breakpoint(per_cpu(*cpu_events, cpu)); + free_percpu(cpu_events); } EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint); -- cgit v1.2.3 From bde96030f438b5eb6fb74f3bdd06d9f68bb3ba00 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 20 Jun 2013 17:50:20 +0200 Subject: hw_breakpoint: Introduce "struct bp_cpuinfo" This patch simply moves all per-cpu variables into the new single per-cpu "struct bp_cpuinfo". To me this looks more logical and clean, but this can also simplify the further potential changes. In particular, I do not think this memory should be per-cpu, it is never used "locally". After this change it is trivial to turn it into, say, bootmem[nr_cpu_ids]. Reported-by: Vince Weaver Signed-off-by: Oleg Nesterov Acked-by: Frederic Weisbecker Link: http://lkml.kernel.org/r/20130620155020.GA6350@redhat.com Signed-off-by: Ingo Molnar --- kernel/events/hw_breakpoint.c | 69 ++++++++++++++++++++++--------------------- 1 file changed, 36 insertions(+), 33 deletions(-) (limited to 'kernel') diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index 38418f786f36..1559fb0b9296 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -46,23 +46,26 @@ #include #include - - /* * Constraints data */ +struct bp_cpuinfo { + /* Number of pinned cpu breakpoints in a cpu */ + unsigned int cpu_pinned; + /* tsk_pinned[n] is the number of tasks having n+1 breakpoints */ + unsigned int *tsk_pinned; + /* Number of non-pinned cpu/task breakpoints in a cpu */ + unsigned int flexible; /* XXX: placeholder, see fetch_this_slot() */ +}; -/* Number of pinned cpu breakpoints in a cpu */ -static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]); - -/* Number of pinned task breakpoints in a cpu */ -static DEFINE_PER_CPU(unsigned int *, nr_task_bp_pinned[TYPE_MAX]); - -/* Number of non-pinned cpu/task breakpoints in a cpu */ -static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]); - +static DEFINE_PER_CPU(struct bp_cpuinfo, bp_cpuinfo[TYPE_MAX]); static int nr_slots[TYPE_MAX]; +static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type) +{ + return per_cpu_ptr(bp_cpuinfo + type, cpu); +} + /* Keep track of the breakpoints attached to tasks */ static LIST_HEAD(bp_task_head); @@ -96,8 +99,8 @@ static inline enum bp_type_idx find_slot_idx(struct perf_event *bp) */ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) { + unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned; int i; - unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); for (i = nr_slots[type] - 1; i >= 0; i--) { if (tsk_pinned[i] > 0) @@ -146,8 +149,10 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, int cpu; for_each_cpu(cpu, cpumask) { - unsigned int nr = per_cpu(nr_cpu_bp_pinned[type], cpu); + struct bp_cpuinfo *info = get_bp_info(cpu, type); + int nr; + nr = info->cpu_pinned; if (!bp->hw.bp_target) nr += max_task_bp_pinned(cpu, type); else @@ -156,8 +161,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, if (nr > slots->pinned) slots->pinned = nr; - nr = per_cpu(nr_bp_flexible[type], cpu); - + nr = info->flexible; if (nr > slots->flexible) slots->flexible = nr; } @@ -180,8 +184,7 @@ fetch_this_slot(struct bp_busy_slots *slots, int weight) static void toggle_bp_task_slot(struct perf_event *bp, int cpu, enum bp_type_idx type, int weight) { - /* tsk_pinned[n-1] is the number of tasks having n>0 breakpoints */ - unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); + unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned; int old_idx, new_idx; old_idx = task_bp_pinned(cpu, bp, type) - 1; @@ -208,7 +211,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, /* Pinned counter cpu profiling */ if (!bp->hw.bp_target) { - per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight; + get_bp_info(bp->cpu, type)->cpu_pinned += weight; return; } @@ -240,8 +243,8 @@ __weak void arch_unregister_hw_breakpoint(struct perf_event *bp) * * - If attached to a single cpu, check: * - * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) - * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM + * (per_cpu(info->flexible, cpu) || (per_cpu(info->cpu_pinned, cpu) + * + max(per_cpu(info->tsk_pinned, cpu)))) < HBP_NUM * * -> If there are already non-pinned counters in this cpu, it means * there is already a free slot for them. @@ -251,8 +254,8 @@ __weak void arch_unregister_hw_breakpoint(struct perf_event *bp) * * - If attached to every cpus, check: * - * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) - * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM + * (per_cpu(info->flexible, *) || (max(per_cpu(info->cpu_pinned, *)) + * + max(per_cpu(info->tsk_pinned, *)))) < HBP_NUM * * -> This is roughly the same, except we check the number of per cpu * bp for every cpu and we keep the max one. Same for the per tasks @@ -263,16 +266,16 @@ __weak void arch_unregister_hw_breakpoint(struct perf_event *bp) * * - If attached to a single cpu, check: * - * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) - * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM + * ((per_cpu(info->flexible, cpu) > 1) + per_cpu(info->cpu_pinned, cpu) + * + max(per_cpu(info->tsk_pinned, cpu))) < HBP_NUM * - * -> Same checks as before. But now the nr_bp_flexible, if any, must keep + * -> Same checks as before. But now the info->flexible, if any, must keep * one register at least (or they will never be fed). * * - If attached to every cpus, check: * - * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) - * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM + * ((per_cpu(info->flexible, *) > 1) + max(per_cpu(info->cpu_pinned, *)) + * + max(per_cpu(info->tsk_pinned, *))) < HBP_NUM */ static int __reserve_bp_slot(struct perf_event *bp) { @@ -622,7 +625,6 @@ static struct pmu perf_breakpoint = { int __init init_hw_breakpoint(void) { - unsigned int **task_bp_pinned; int cpu, err_cpu; int i; @@ -631,10 +633,11 @@ int __init init_hw_breakpoint(void) for_each_possible_cpu(cpu) { for (i = 0; i < TYPE_MAX; i++) { - task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu); - *task_bp_pinned = kzalloc(sizeof(int) * nr_slots[i], - GFP_KERNEL); - if (!*task_bp_pinned) + struct bp_cpuinfo *info = get_bp_info(cpu, i); + + info->tsk_pinned = kcalloc(nr_slots[i], sizeof(int), + GFP_KERNEL); + if (!info->tsk_pinned) goto err_alloc; } } @@ -648,7 +651,7 @@ int __init init_hw_breakpoint(void) err_alloc: for_each_possible_cpu(err_cpu) { for (i = 0; i < TYPE_MAX; i++) - kfree(per_cpu(nr_task_bp_pinned[i], err_cpu)); + kfree(get_bp_info(err_cpu, i)->tsk_pinned); if (err_cpu == cpu) break; } -- cgit v1.2.3 From 14c63f17b1fde5a575a28e96547a22b451c71fb5 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 21 Jun 2013 08:51:36 -0700 Subject: perf: Drop sample rate when sampling is too slow This patch keeps track of how long perf's NMI handler is taking, and also calculates how many samples perf can take a second. If the sample length times the expected max number of samples exceeds a configurable threshold, it drops the sample rate. This way, we don't have a runaway sampling process eating up the CPU. This patch can tend to drop the sample rate down to level where perf doesn't work very well. *BUT* the alternative is that my system hangs because it spends all of its time handling NMIs. I'll take a busted performance tool over an entire system that's busted and undebuggable any day. BTW, my suspicion is that there's still an underlying bug here. Using the HPET instead of the TSC is definitely a contributing factor, but I suspect there are some other things going on. But, I can't go dig down on a bug like that with my machine hanging all the time. Signed-off-by: Dave Hansen Acked-by: Peter Zijlstra Cc: paulus@samba.org Cc: acme@ghostprotocols.net Cc: Dave Hansen [ Prettified it a bit. ] Signed-off-by: Ingo Molnar --- kernel/events/core.c | 92 +++++++++++++++++++++++++++++++++++++++++++++++++--- kernel/sysctl.c | 9 +++++ 2 files changed, 97 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index 9c8920783317..1db3af933704 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -165,10 +165,26 @@ int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' /* * max perf event sample rate */ -#define DEFAULT_MAX_SAMPLE_RATE 100000 -int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE; -static int max_samples_per_tick __read_mostly = - DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ); +#define DEFAULT_MAX_SAMPLE_RATE 100000 +#define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE) +#define DEFAULT_CPU_TIME_MAX_PERCENT 25 + +int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE; + +static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ); +static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS; + +static atomic_t perf_sample_allowed_ns __read_mostly = + ATOMIC_INIT( DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100); + +void update_perf_cpu_limits(void) +{ + u64 tmp = perf_sample_period_ns; + + tmp *= sysctl_perf_cpu_time_max_percent; + tmp = do_div(tmp, 100); + atomic_set(&perf_sample_allowed_ns, tmp); +} static int perf_rotate_context(struct perf_cpu_context *cpuctx); @@ -182,10 +198,78 @@ int perf_proc_update_handler(struct ctl_table *table, int write, return ret; max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); + perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; + update_perf_cpu_limits(); return 0; } +int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT; + +int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret = proc_dointvec(table, write, buffer, lenp, ppos); + + if (ret || !write) + return ret; + + update_perf_cpu_limits(); + + return 0; +} + +/* + * perf samples are done in some very critical code paths (NMIs). + * If they take too much CPU time, the system can lock up and not + * get any real work done. This will drop the sample rate when + * we detect that events are taking too long. + */ +#define NR_ACCUMULATED_SAMPLES 128 +DEFINE_PER_CPU(u64, running_sample_length); + +void perf_sample_event_took(u64 sample_len_ns) +{ + u64 avg_local_sample_len; + u64 local_samples_len = __get_cpu_var(running_sample_length); + + if (atomic_read(&perf_sample_allowed_ns) == 0) + return; + + /* decay the counter by 1 average sample */ + local_samples_len = __get_cpu_var(running_sample_length); + local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES; + local_samples_len += sample_len_ns; + __get_cpu_var(running_sample_length) = local_samples_len; + + /* + * note: this will be biased artifically low until we have + * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us + * from having to maintain a count. + */ + avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; + + if (avg_local_sample_len <= atomic_read(&perf_sample_allowed_ns)) + return; + + if (max_samples_per_tick <= 1) + return; + + max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2); + sysctl_perf_event_sample_rate = max_samples_per_tick * HZ; + perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; + + printk_ratelimited(KERN_WARNING + "perf samples too long (%lld > %d), lowering " + "kernel.perf_event_max_sample_rate to %d\n", + avg_local_sample_len, + atomic_read(&perf_sample_allowed_ns), + sysctl_perf_event_sample_rate); + + update_perf_cpu_limits(); +} + static atomic64_t perf_event_id; static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, diff --git a/kernel/sysctl.c b/kernel/sysctl.c index b0a1f99907f3..4ce13c3cedb9 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1043,6 +1043,15 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = perf_proc_update_handler, }, + { + .procname = "perf_cpu_time_max_percent", + .data = &sysctl_perf_cpu_time_max_percent, + .maxlen = sizeof(sysctl_perf_cpu_time_max_percent), + .mode = 0644, + .proc_handler = perf_cpu_time_max_percent_handler, + .extra1 = &zero, + .extra2 = &one_hundred, + }, #endif #ifdef CONFIG_KMEMCHECK { -- cgit v1.2.3