aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2015-08-16 14:06:07 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2015-08-16 14:06:07 +0200
commit763602962f31c587138c0edda7364807b3321bba (patch)
tree84167cb01577788710a59482d376a59fc3183e3a /kernel
parent0917f823c59692d751951bf5ea699a2d1e2f26a2 (diff)
parent352cb8677f83a6cf2139151578c8c79785d2d4bf (diff)
Merge tag 'v4.1.5' into linux-4.1.y-rt
This is the 4.1.5 stable release
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/resend.c18
-rw-r--r--kernel/power/Kconfig2
-rw-r--r--kernel/printk/printk.c11
-rw-r--r--kernel/trace/ftrace.c52
-rw-r--r--kernel/trace/trace.h3
-rw-r--r--kernel/trace/trace_branch.c17
-rw-r--r--kernel/trace/trace_events_filter.c10
7 files changed, 73 insertions, 40 deletions
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index 9065107f083e..7a5237a1bce5 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -75,13 +75,21 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
!desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
#ifdef CONFIG_HARDIRQS_SW_RESEND
/*
- * If the interrupt has a parent irq and runs
- * in the thread context of the parent irq,
- * retrigger the parent.
+ * If the interrupt is running in the thread
+ * context of the parent irq we need to be
+ * careful, because we cannot trigger it
+ * directly.
*/
- if (desc->parent_irq &&
- irq_settings_is_nested_thread(desc))
+ if (irq_settings_is_nested_thread(desc)) {
+ /*
+ * If the parent_irq is valid, we
+ * retrigger the parent, otherwise we
+ * do nothing.
+ */
+ if (!desc->parent_irq)
+ return;
irq = desc->parent_irq;
+ }
/* Set it pending and activate the softirq: */
set_bit(irq, irqs_resend);
tasklet_schedule(&resend_tasklet);
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 7e01f78f0417..9e302315e33d 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -187,7 +187,7 @@ config DPM_WATCHDOG
config DPM_WATCHDOG_TIMEOUT
int "Watchdog timeout in seconds"
range 1 120
- default 12
+ default 60
depends on DPM_WATCHDOG
config PM_TRACE
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 6062a0442f05..c2d5877aed06 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -484,11 +484,11 @@ int check_syslog_permissions(int type, bool from_file)
* already done the capabilities checks at open time.
*/
if (from_file && type != SYSLOG_ACTION_OPEN)
- return 0;
+ goto ok;
if (syslog_action_restricted(type)) {
if (capable(CAP_SYSLOG))
- return 0;
+ goto ok;
/*
* For historical reasons, accept CAP_SYS_ADMIN too, with
* a warning.
@@ -498,10 +498,11 @@ int check_syslog_permissions(int type, bool from_file)
"CAP_SYS_ADMIN but no CAP_SYSLOG "
"(deprecated).\n",
current->comm, task_pid_nr(current));
- return 0;
+ goto ok;
}
return -EPERM;
}
+ok:
return security_syslog(type);
}
@@ -1288,10 +1289,6 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
if (error)
goto out;
- error = security_syslog(type);
- if (error)
- return error;
-
switch (type) {
case SYSLOG_ACTION_CLOSE: /* Close log */
break;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 02bece4a99ea..eb11011b5292 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -98,6 +98,13 @@ struct ftrace_pid {
struct pid *pid;
};
+static bool ftrace_pids_enabled(void)
+{
+ return !list_empty(&ftrace_pids);
+}
+
+static void ftrace_update_trampoline(struct ftrace_ops *ops);
+
/*
* ftrace_disabled is set when an anomaly is discovered.
* ftrace_disabled is much stronger than ftrace_enabled.
@@ -109,7 +116,6 @@ static DEFINE_MUTEX(ftrace_lock);
static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
-ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
static struct ftrace_ops global_ops;
static struct ftrace_ops control_ops;
@@ -183,14 +189,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
if (!test_tsk_trace_trace(current))
return;
- ftrace_pid_function(ip, parent_ip, op, regs);
-}
-
-static void set_ftrace_pid_function(ftrace_func_t func)
-{
- /* do not set ftrace_pid_function to itself! */
- if (func != ftrace_pid_func)
- ftrace_pid_function = func;
+ op->saved_func(ip, parent_ip, op, regs);
}
/**
@@ -202,7 +201,6 @@ static void set_ftrace_pid_function(ftrace_func_t func)
void clear_ftrace_function(void)
{
ftrace_trace_function = ftrace_stub;
- ftrace_pid_function = ftrace_stub;
}
static void control_ops_disable_all(struct ftrace_ops *ops)
@@ -436,6 +434,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
} else
add_ftrace_ops(&ftrace_ops_list, ops);
+ /* Always save the function, and reset at unregistering */
+ ops->saved_func = ops->func;
+
+ if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled())
+ ops->func = ftrace_pid_func;
+
ftrace_update_trampoline(ops);
if (ftrace_enabled)
@@ -463,15 +467,28 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
if (ftrace_enabled)
update_ftrace_function();
+ ops->func = ops->saved_func;
+
return 0;
}
static void ftrace_update_pid_func(void)
{
+ bool enabled = ftrace_pids_enabled();
+ struct ftrace_ops *op;
+
/* Only do something if we are tracing something */
if (ftrace_trace_function == ftrace_stub)
return;
+ do_for_each_ftrace_op(op, ftrace_ops_list) {
+ if (op->flags & FTRACE_OPS_FL_PID) {
+ op->func = enabled ? ftrace_pid_func :
+ op->saved_func;
+ ftrace_update_trampoline(op);
+ }
+ } while_for_each_ftrace_op(op);
+
update_ftrace_function();
}
@@ -1133,7 +1150,8 @@ static struct ftrace_ops global_ops = {
.local_hash.filter_hash = EMPTY_HASH,
INIT_OPS_HASH(global_ops)
.flags = FTRACE_OPS_FL_RECURSION_SAFE |
- FTRACE_OPS_FL_INITIALIZED,
+ FTRACE_OPS_FL_INITIALIZED |
+ FTRACE_OPS_FL_PID,
};
/*
@@ -5023,7 +5041,9 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops)
static struct ftrace_ops global_ops = {
.func = ftrace_stub,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE |
+ FTRACE_OPS_FL_INITIALIZED |
+ FTRACE_OPS_FL_PID,
};
static int __init ftrace_nodyn_init(void)
@@ -5080,11 +5100,6 @@ void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
if (WARN_ON(tr->ops->func != ftrace_stub))
printk("ftrace ops had %pS for function\n",
tr->ops->func);
- /* Only the top level instance does pid tracing */
- if (!list_empty(&ftrace_pids)) {
- set_ftrace_pid_function(func);
- func = ftrace_pid_func;
- }
}
tr->ops->func = func;
tr->ops->private = tr;
@@ -5371,7 +5386,7 @@ static void *fpid_start(struct seq_file *m, loff_t *pos)
{
mutex_lock(&ftrace_lock);
- if (list_empty(&ftrace_pids) && (!*pos))
+ if (!ftrace_pids_enabled() && (!*pos))
return (void *) 1;
return seq_list_start(&ftrace_pids, *pos);
@@ -5610,6 +5625,7 @@ static struct ftrace_ops graph_ops = {
.func = ftrace_stub,
.flags = FTRACE_OPS_FL_RECURSION_SAFE |
FTRACE_OPS_FL_INITIALIZED |
+ FTRACE_OPS_FL_PID |
FTRACE_OPS_FL_STUB,
#ifdef FTRACE_GRAPH_TRAMP_ADDR
.trampoline = FTRACE_GRAPH_TRAMP_ADDR,
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index a6784ca9cd74..c0f3c568cac8 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -446,6 +446,7 @@ enum {
TRACE_CONTROL_BIT,
+ TRACE_BRANCH_BIT,
/*
* Abuse of the trace_recursion.
* As we need a way to maintain state if we are tracing the function
@@ -1314,7 +1315,7 @@ void trace_event_init(void);
void trace_event_enum_update(struct trace_enum_map **map, int len);
#else
static inline void __init trace_event_init(void) { }
-static inlin void trace_event_enum_update(struct trace_enum_map **map, int len) { }
+static inline void trace_event_enum_update(struct trace_enum_map **map, int len) { }
#endif
extern struct trace_iterator *tracepoint_print_iter;
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 57cbf1efdd44..1879980f06c2 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -36,9 +36,12 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
struct trace_branch *entry;
struct ring_buffer *buffer;
unsigned long flags;
- int cpu, pc;
+ int pc;
const char *p;
+ if (current->trace_recursion & TRACE_BRANCH_BIT)
+ return;
+
/*
* I would love to save just the ftrace_likely_data pointer, but
* this code can also be used by modules. Ugly things can happen
@@ -49,10 +52,10 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
if (unlikely(!tr))
return;
- local_irq_save(flags);
- cpu = raw_smp_processor_id();
- data = per_cpu_ptr(tr->trace_buffer.data, cpu);
- if (atomic_inc_return(&data->disabled) != 1)
+ raw_local_irq_save(flags);
+ current->trace_recursion |= TRACE_BRANCH_BIT;
+ data = this_cpu_ptr(tr->trace_buffer.data);
+ if (atomic_read(&data->disabled))
goto out;
pc = preempt_count();
@@ -81,8 +84,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
__buffer_unlock_commit(buffer, event);
out:
- atomic_dec(&data->disabled);
- local_irq_restore(flags);
+ current->trace_recursion &= ~TRACE_BRANCH_BIT;
+ raw_local_irq_restore(flags);
}
static inline
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 7f2e97ce71a7..52adf02d7619 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1056,6 +1056,9 @@ static void parse_init(struct filter_parse_state *ps,
static char infix_next(struct filter_parse_state *ps)
{
+ if (!ps->infix.cnt)
+ return 0;
+
ps->infix.cnt--;
return ps->infix.string[ps->infix.tail++];
@@ -1071,6 +1074,9 @@ static char infix_peek(struct filter_parse_state *ps)
static void infix_advance(struct filter_parse_state *ps)
{
+ if (!ps->infix.cnt)
+ return;
+
ps->infix.cnt--;
ps->infix.tail++;
}
@@ -1385,7 +1391,9 @@ static int check_preds(struct filter_parse_state *ps)
if (elt->op != OP_NOT)
cnt--;
n_normal_preds++;
- WARN_ON_ONCE(cnt < 0);
+ /* all ops should have operands */
+ if (cnt < 0)
+ break;
}
if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {