aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/hrtimer.c2
-rw-r--r--kernel/perf_event.c282
-rw-r--r--kernel/time/tick-sched.c9
-rw-r--r--kernel/trace/blktrace.c39
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/kmemtrace.c2
-rw-r--r--kernel/trace/trace_syscalls.c4
7 files changed, 248 insertions, 94 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 6d7020490f94..3e1c36e7998f 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -726,8 +726,6 @@ static int hrtimer_switch_to_hres(void)
/* "Retrigger" the interrupt to get things going */
retrigger_next_event(NULL);
local_irq_restore(flags);
- printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n",
- smp_processor_id());
return 1;
}
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 0f86feb6db0c..9d0b5c665883 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -20,6 +20,7 @@
#include <linux/percpu.h>
#include <linux/ptrace.h>
#include <linux/vmstat.h>
+#include <linux/vmalloc.h>
#include <linux/hardirq.h>
#include <linux/rculist.h>
#include <linux/uaccess.h>
@@ -1030,14 +1031,10 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
update_context_time(ctx);
perf_disable();
- if (ctx->nr_active) {
- list_for_each_entry(event, &ctx->group_list, group_entry) {
- if (event != event->group_leader)
- event_sched_out(event, cpuctx, ctx);
- else
- group_sched_out(event, cpuctx, ctx);
- }
- }
+ if (ctx->nr_active)
+ list_for_each_entry(event, &ctx->group_list, group_entry)
+ group_sched_out(event, cpuctx, ctx);
+
perf_enable();
out:
spin_unlock(&ctx->lock);
@@ -1258,12 +1255,8 @@ __perf_event_sched_in(struct perf_event_context *ctx,
if (event->cpu != -1 && event->cpu != cpu)
continue;
- if (event != event->group_leader)
- event_sched_in(event, cpuctx, ctx, cpu);
- else {
- if (group_can_go_on(event, cpuctx, 1))
- group_sched_in(event, cpuctx, ctx, cpu);
- }
+ if (group_can_go_on(event, cpuctx, 1))
+ group_sched_in(event, cpuctx, ctx, cpu);
/*
* If this pinned group hasn't been scheduled,
@@ -1291,15 +1284,9 @@ __perf_event_sched_in(struct perf_event_context *ctx,
if (event->cpu != -1 && event->cpu != cpu)
continue;
- if (event != event->group_leader) {
- if (event_sched_in(event, cpuctx, ctx, cpu))
+ if (group_can_go_on(event, cpuctx, can_add_hw))
+ if (group_sched_in(event, cpuctx, ctx, cpu))
can_add_hw = 0;
- } else {
- if (group_can_go_on(event, cpuctx, can_add_hw)) {
- if (group_sched_in(event, cpuctx, ctx, cpu))
- can_add_hw = 0;
- }
- }
}
perf_enable();
out:
@@ -2105,49 +2092,31 @@ unlock:
rcu_read_unlock();
}
-static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static unsigned long perf_data_size(struct perf_mmap_data *data)
{
- struct perf_event *event = vma->vm_file->private_data;
- struct perf_mmap_data *data;
- int ret = VM_FAULT_SIGBUS;
-
- if (vmf->flags & FAULT_FLAG_MKWRITE) {
- if (vmf->pgoff == 0)
- ret = 0;
- return ret;
- }
-
- rcu_read_lock();
- data = rcu_dereference(event->data);
- if (!data)
- goto unlock;
-
- if (vmf->pgoff == 0) {
- vmf->page = virt_to_page(data->user_page);
- } else {
- int nr = vmf->pgoff - 1;
-
- if ((unsigned)nr > data->nr_pages)
- goto unlock;
+ return data->nr_pages << (PAGE_SHIFT + data->data_order);
+}
- if (vmf->flags & FAULT_FLAG_WRITE)
- goto unlock;
+#ifndef CONFIG_PERF_USE_VMALLOC
- vmf->page = virt_to_page(data->data_pages[nr]);
- }
+/*
+ * Back perf_mmap() with regular GFP_KERNEL-0 pages.
+ */
- get_page(vmf->page);
- vmf->page->mapping = vma->vm_file->f_mapping;
- vmf->page->index = vmf->pgoff;
+static struct page *
+perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
+{
+ if (pgoff > data->nr_pages)
+ return NULL;
- ret = 0;
-unlock:
- rcu_read_unlock();
+ if (pgoff == 0)
+ return virt_to_page(data->user_page);
- return ret;
+ return virt_to_page(data->data_pages[pgoff - 1]);
}
-static int perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
+static struct perf_mmap_data *
+perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
{
struct perf_mmap_data *data;
unsigned long size;
@@ -2172,19 +2141,10 @@ static int perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
goto fail_data_pages;
}
+ data->data_order = 0;
data->nr_pages = nr_pages;
- atomic_set(&data->lock, -1);
-
- if (event->attr.watermark) {
- data->watermark = min_t(long, PAGE_SIZE * nr_pages,
- event->attr.wakeup_watermark);
- }
- if (!data->watermark)
- data->watermark = max(PAGE_SIZE, PAGE_SIZE * nr_pages / 4);
- rcu_assign_pointer(event->data, data);
-
- return 0;
+ return data;
fail_data_pages:
for (i--; i >= 0; i--)
@@ -2196,7 +2156,7 @@ fail_user_page:
kfree(data);
fail:
- return -ENOMEM;
+ return NULL;
}
static void perf_mmap_free_page(unsigned long addr)
@@ -2207,28 +2167,169 @@ static void perf_mmap_free_page(unsigned long addr)
__free_page(page);
}
-static void __perf_mmap_data_free(struct rcu_head *rcu_head)
+static void perf_mmap_data_free(struct perf_mmap_data *data)
{
- struct perf_mmap_data *data;
int i;
- data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
-
perf_mmap_free_page((unsigned long)data->user_page);
for (i = 0; i < data->nr_pages; i++)
perf_mmap_free_page((unsigned long)data->data_pages[i]);
+}
+
+#else
+
+/*
+ * Back perf_mmap() with vmalloc memory.
+ *
+ * Required for architectures that have d-cache aliasing issues.
+ */
+
+static struct page *
+perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
+{
+ if (pgoff > (1UL << data->data_order))
+ return NULL;
+
+ return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE);
+}
+
+static void perf_mmap_unmark_page(void *addr)
+{
+ struct page *page = vmalloc_to_page(addr);
+
+ page->mapping = NULL;
+}
+
+static void perf_mmap_data_free_work(struct work_struct *work)
+{
+ struct perf_mmap_data *data;
+ void *base;
+ int i, nr;
+
+ data = container_of(work, struct perf_mmap_data, work);
+ nr = 1 << data->data_order;
+
+ base = data->user_page;
+ for (i = 0; i < nr + 1; i++)
+ perf_mmap_unmark_page(base + (i * PAGE_SIZE));
+
+ vfree(base);
+}
+static void perf_mmap_data_free(struct perf_mmap_data *data)
+{
+ schedule_work(&data->work);
+}
+
+static struct perf_mmap_data *
+perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
+{
+ struct perf_mmap_data *data;
+ unsigned long size;
+ void *all_buf;
+
+ WARN_ON(atomic_read(&event->mmap_count));
+
+ size = sizeof(struct perf_mmap_data);
+ size += sizeof(void *);
+
+ data = kzalloc(size, GFP_KERNEL);
+ if (!data)
+ goto fail;
+
+ INIT_WORK(&data->work, perf_mmap_data_free_work);
+
+ all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
+ if (!all_buf)
+ goto fail_all_buf;
+
+ data->user_page = all_buf;
+ data->data_pages[0] = all_buf + PAGE_SIZE;
+ data->data_order = ilog2(nr_pages);
+ data->nr_pages = 1;
+
+ return data;
+
+fail_all_buf:
+ kfree(data);
+
+fail:
+ return NULL;
+}
+
+#endif
+
+static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct perf_event *event = vma->vm_file->private_data;
+ struct perf_mmap_data *data;
+ int ret = VM_FAULT_SIGBUS;
+
+ if (vmf->flags & FAULT_FLAG_MKWRITE) {
+ if (vmf->pgoff == 0)
+ ret = 0;
+ return ret;
+ }
+
+ rcu_read_lock();
+ data = rcu_dereference(event->data);
+ if (!data)
+ goto unlock;
+
+ if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
+ goto unlock;
+
+ vmf->page = perf_mmap_to_page(data, vmf->pgoff);
+ if (!vmf->page)
+ goto unlock;
+
+ get_page(vmf->page);
+ vmf->page->mapping = vma->vm_file->f_mapping;
+ vmf->page->index = vmf->pgoff;
+
+ ret = 0;
+unlock:
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static void
+perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data)
+{
+ long max_size = perf_data_size(data);
+
+ atomic_set(&data->lock, -1);
+
+ if (event->attr.watermark) {
+ data->watermark = min_t(long, max_size,
+ event->attr.wakeup_watermark);
+ }
+
+ if (!data->watermark)
+ data->watermark = max_t(long, PAGE_SIZE, max_size / 2);
+
+
+ rcu_assign_pointer(event->data, data);
+}
+
+static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head)
+{
+ struct perf_mmap_data *data;
+
+ data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
+ perf_mmap_data_free(data);
kfree(data);
}
-static void perf_mmap_data_free(struct perf_event *event)
+static void perf_mmap_data_release(struct perf_event *event)
{
struct perf_mmap_data *data = event->data;
WARN_ON(atomic_read(&event->mmap_count));
rcu_assign_pointer(event->data, NULL);
- call_rcu(&data->rcu_head, __perf_mmap_data_free);
+ call_rcu(&data->rcu_head, perf_mmap_data_free_rcu);
}
static void perf_mmap_open(struct vm_area_struct *vma)
@@ -2244,11 +2345,12 @@ static void perf_mmap_close(struct vm_area_struct *vma)
WARN_ON_ONCE(event->ctx->parent_ctx);
if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
+ unsigned long size = perf_data_size(event->data);
struct user_struct *user = current_user();
- atomic_long_sub(event->data->nr_pages + 1, &user->locked_vm);
+ atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
vma->vm_mm->locked_vm -= event->data->nr_locked;
- perf_mmap_data_free(event);
+ perf_mmap_data_release(event);
mutex_unlock(&event->mmap_mutex);
}
}
@@ -2266,6 +2368,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
unsigned long user_locked, user_lock_limit;
struct user_struct *user = current_user();
unsigned long locked, lock_limit;
+ struct perf_mmap_data *data;
unsigned long vma_size;
unsigned long nr_pages;
long user_extra, extra;
@@ -2328,10 +2431,15 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
}
WARN_ON(event->data);
- ret = perf_mmap_data_alloc(event, nr_pages);
- if (ret)
+
+ data = perf_mmap_data_alloc(event, nr_pages);
+ ret = -ENOMEM;
+ if (!data)
goto unlock;
+ ret = 0;
+ perf_mmap_data_init(event, data);
+
atomic_set(&event->mmap_count, 1);
atomic_long_add(user_extra, &user->locked_vm);
vma->vm_mm->locked_vm += extra;
@@ -2519,7 +2627,7 @@ static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail,
if (!data->writable)
return true;
- mask = (data->nr_pages << PAGE_SHIFT) - 1;
+ mask = perf_data_size(data) - 1;
offset = (offset - tail) & mask;
head = (head - tail) & mask;
@@ -2624,7 +2732,7 @@ void perf_output_copy(struct perf_output_handle *handle,
const void *buf, unsigned int len)
{
unsigned int pages_mask;
- unsigned int offset;
+ unsigned long offset;
unsigned int size;
void **pages;
@@ -2633,12 +2741,14 @@ void perf_output_copy(struct perf_output_handle *handle,
pages = handle->data->data_pages;
do {
- unsigned int page_offset;
+ unsigned long page_offset;
+ unsigned long page_size;
int nr;
nr = (offset >> PAGE_SHIFT) & pages_mask;
- page_offset = offset & (PAGE_SIZE - 1);
- size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
+ page_size = 1UL << (handle->data->data_order + PAGE_SHIFT);
+ page_offset = offset & (page_size - 1);
+ size = min_t(unsigned int, page_size - page_offset, len);
memcpy(pages[nr] + page_offset, buf, size);
@@ -4781,9 +4891,7 @@ int perf_event_init_task(struct task_struct *child)
* We dont have to disable NMIs - we are only looking at
* the list, not manipulating it:
*/
- list_for_each_entry_rcu(event, &parent_ctx->event_list, event_entry) {
- if (event != event->group_leader)
- continue;
+ list_for_each_entry(event, &parent_ctx->group_list, group_entry) {
if (!event->attr.inherit) {
inherited_all = 0;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index e0f59a21c061..89aed5933ed4 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -231,6 +231,13 @@ void tick_nohz_stop_sched_tick(int inidle)
if (!inidle && !ts->inidle)
goto end;
+ /*
+ * Set ts->inidle unconditionally. Even if the system did not
+ * switch to NOHZ mode the cpu frequency governers rely on the
+ * update of the idle time accounting in tick_nohz_start_idle().
+ */
+ ts->inidle = 1;
+
now = tick_nohz_start_idle(ts);
/*
@@ -248,8 +255,6 @@ void tick_nohz_stop_sched_tick(int inidle)
if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
goto end;
- ts->inidle = 1;
-
if (need_resched())
goto end;
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 3eb159c277c8..d9d6206e0b14 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -856,6 +856,37 @@ static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
}
/**
+ * blk_add_trace_rq_remap - Add a trace for a request-remap operation
+ * @q: queue the io is for
+ * @rq: the source request
+ * @dev: target device
+ * @from: source sector
+ *
+ * Description:
+ * Device mapper remaps request to other devices.
+ * Add a trace for that action.
+ *
+ **/
+static void blk_add_trace_rq_remap(struct request_queue *q,
+ struct request *rq, dev_t dev,
+ sector_t from)
+{
+ struct blk_trace *bt = q->blk_trace;
+ struct blk_io_trace_remap r;
+
+ if (likely(!bt))
+ return;
+
+ r.device_from = cpu_to_be32(dev);
+ r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
+ r.sector_from = cpu_to_be64(from);
+
+ __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
+ rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
+ sizeof(r), &r);
+}
+
+/**
* blk_add_driver_data - Add binary message with driver-specific data
* @q: queue the io is for
* @rq: io request
@@ -922,10 +953,13 @@ static void blk_register_tracepoints(void)
WARN_ON(ret);
ret = register_trace_block_remap(blk_add_trace_remap);
WARN_ON(ret);
+ ret = register_trace_block_rq_remap(blk_add_trace_rq_remap);
+ WARN_ON(ret);
}
static void blk_unregister_tracepoints(void)
{
+ unregister_trace_block_rq_remap(blk_add_trace_rq_remap);
unregister_trace_block_remap(blk_add_trace_remap);
unregister_trace_block_split(blk_add_trace_split);
unregister_trace_block_unplug_io(blk_add_trace_unplug_io);
@@ -1657,6 +1691,11 @@ int blk_trace_init_sysfs(struct device *dev)
return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
}
+void blk_trace_remove_sysfs(struct device *dev)
+{
+ sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
+}
+
#endif /* CONFIG_BLK_DEV_IO_TRACE */
#ifdef CONFIG_EVENT_TRACING
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f136fe5da07a..37ba67e33265 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -225,7 +225,11 @@ static void ftrace_update_pid_func(void)
if (ftrace_trace_function == ftrace_stub)
return;
+#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
func = ftrace_trace_function;
+#else
+ func = __ftrace_trace_function;
+#endif
if (ftrace_pid_trace) {
set_ftrace_pid_function(func);
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c
index 81b1645c8549..a91da69f153a 100644
--- a/kernel/trace/kmemtrace.c
+++ b/kernel/trace/kmemtrace.c
@@ -501,7 +501,7 @@ static int __init init_kmem_tracer(void)
return 1;
}
- if (!register_tracer(&kmem_tracer)) {
+ if (register_tracer(&kmem_tracer) != 0) {
pr_warning("Warning: could not register the kmem tracer\n");
return 1;
}
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 9fbce6c9d2e1..527e17eae575 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -166,7 +166,7 @@ int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s)
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n",
SYSCALL_FIELD(int, nr),
- SYSCALL_FIELD(unsigned long, ret));
+ SYSCALL_FIELD(long, ret));
if (!ret)
return 0;
@@ -212,7 +212,7 @@ int syscall_exit_define_fields(struct ftrace_event_call *call)
if (ret)
return ret;
- ret = trace_define_field(call, SYSCALL_FIELD(unsigned long, ret), 0,
+ ret = trace_define_field(call, SYSCALL_FIELD(long, ret), 0,
FILTER_OTHER);
return ret;